diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 000000000..479af4af8 --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,7 @@ +[bumpversion] +current_version = 1.3.4 + +[bumpversion:file:setup.py] + +[bumpversion:file:moto/__init__.py] + diff --git a/.gitignore b/.gitignore index 18026d60f..0a24fe476 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,9 @@ build/ .DS_Store python_env .ropeproject/ +.pytest_cache/ +venv/ +.python-version +.vscode/ +tests/file.tmp +.eggs/ diff --git a/.travis.yml b/.travis.yml index f1b7ac40d..77dd2ae55 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,37 +1,57 @@ +dist: xenial language: python sudo: false services: - - docker +- docker python: - - 2.7 - - 3.6 +- 2.7 +- 3.6 +- 3.7 env: - - TEST_SERVER_MODE=false - - TEST_SERVER_MODE=true +- TEST_SERVER_MODE=false +- TEST_SERVER_MODE=true before_install: - - export BOTO_CONFIG=/dev/null +- export BOTO_CONFIG=/dev/null install: - # We build moto first so the docker container doesn't try to compile it as well, also note we don't use - # -d for docker run so the logs show up in travis - # Python images come from here: https://hub.docker.com/_/python/ - - | - python setup.py sdist +- | + python setup.py sdist - if [ "$TEST_SERVER_MODE" = "true" ]; then - docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh & - export AWS_SECRET_ACCESS_KEY=foobar_secret - export AWS_ACCESS_KEY_ID=foobar_key - fi - travis_retry pip install boto==2.45.0 - travis_retry pip install boto3 - travis_retry pip install dist/moto*.gz - travis_retry pip install coveralls==1.1 - travis_retry pip install -r requirements-dev.txt + if [ "$TEST_SERVER_MODE" = "true" ]; then + docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh & + fi + travis_retry pip install boto==2.45.0 + travis_retry pip install boto3 + travis_retry pip install dist/moto*.gz + travis_retry pip install coveralls==1.1 + travis_retry pip install -r requirements-dev.txt - if [ "$TEST_SERVER_MODE" = "true" ]; then - python wait_for.py - fi + if [ "$TEST_SERVER_MODE" = "true" ]; then + python wait_for.py + fi script: - - make test +- make test after_success: - - coveralls +- coveralls +before_deploy: +- git checkout $TRAVIS_BRANCH +- git fetch --unshallow +- python update_version_from_git.py +deploy: + - provider: pypi + distributions: sdist bdist_wheel + user: spulec + password: + secure: NxnPylnTfekJmGyoufCw0lMoYRskSMJzvAIyAlJJVYKwEhmiCPOrdy5qV8i8mRZ1AkUsqU3jBZ/PD56n96clHW0E3d080UleRDj6JpyALVdeLfMqZl9kLmZ8bqakWzYq3VSJKw2zGP/L4tPGf8wTK1SUv9yl/YNDsBdCkjDverw= + on: + branch: + - master + skip_cleanup: true + skip_existing: true + # - provider: pypi + # distributions: sdist bdist_wheel + # user: spulec + # password: + # secure: NxnPylnTfekJmGyoufCw0lMoYRskSMJzvAIyAlJJVYKwEhmiCPOrdy5qV8i8mRZ1AkUsqU3jBZ/PD56n96clHW0E3d080UleRDj6JpyALVdeLfMqZl9kLmZ8bqakWzYq3VSJKw2zGP/L4tPGf8wTK1SUv9yl/YNDsBdCkjDverw= + # on: + # tags: true + # skip_existing: true diff --git a/AUTHORS.md b/AUTHORS.md index f4160146c..01b000182 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -47,3 +47,13 @@ Moto is written by Steve Pulec with contributions from: * [Adam Stauffer](https://github.com/adamstauffer) * [Guy Templeton](https://github.com/gjtempleton) * [Michael van Tellingen](https://github.com/mvantellingen) +* [Jessie Nadler](https://github.com/nadlerjessie) +* [Alex Morken](https://github.com/alexmorken) +* [Clive Li](https://github.com/cliveli) +* [Jim Shields](https://github.com/jimjshields) +* [William Richard](https://github.com/william-richard) +* [Alex Casalboni](https://github.com/alexcasalboni) +* [Jon Beilke](https://github.com/jrbeilke) +* [Bendeguz Acs](https://github.com/acsbendi) +* [Craig Anderson](https://github.com/craiga) +* [Robert Lewis](https://github.com/ralewis85) diff --git a/CHANGELOG.md b/CHANGELOG.md index 069569c5c..f42619b33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,95 @@ Moto Changelog =================== -Latest +1.3.7 +----- + + * Switch from mocking requests to using before-send for AWS calls + +1.3.6 +----- + + * Fix boto3 pinning. + +1.3.5 +----- + + * Pin down botocore issue as temporary fix for #1793. + * More features on secrets manager + +1.3.4 ------ + + * IAM get account authorization details + * adding account id to ManagedPolicy ARN + * APIGateway usage plans and usage plan keys + * ECR list images + +1.3.3 +------ + + * Fix a regression in S3 url regexes + * APIGateway region fixes + * ECS improvements + * Add @mock_cognitoidentity, thanks to @brcoding + + +1.3.2 +------ +The huge change in this version is that the responses library is no longer vendored. Many developers are now unblocked. Kudos to @spulec for the fix. + + * Fix route53 TTL bug + * Added filtering support for S3 lifecycle + * unvendoring responses + +1.3.0 +------ + +Dozens of major endpoint additions in this release. Highlights include: + + * Fixed AMI tests and the Travis build setup + * SNS improvements + * Dynamodb improvements + * EBS improvements + * Redshift improvements + * RDS snapshot improvements + * S3 improvements + * Cloudwatch improvements + * SSM improvements + * IAM improvements + * ELBV1 and ELBV2 improvements + * Lambda improvements + * EC2 spot pricing improvements + * ApiGateway improvements + * VPC improvements + +1.2.0 +------ + + * Supports filtering AMIs by self + * Implemented signal_workflow_execution for SWF + * Wired SWF backend to the moto server + * Added url decoding to x-amz-copy-source header for copying S3 files + * Revamped lambda function storage to do versioning + * IOT improvements + * RDS improvements + * Implemented CloudWatch get_metric_statistics + * Improved Cloudformation EC2 support + * Implemented Cloudformation change_set endpoints + +1.1.25 +----- + + * Implemented Iot and Iot-data + * Implemented resource tagging API + * EC2 AMIs now have owners + * Improve codegen scaffolding + * Many small fixes to EC2 support + * CloudFormation ELBv2 support + * UTF fixes for S3 + * Implemented SSM get_parameters_by_path + * More advanced Dynamodb querying + 1.1.24 ----- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1266d508e..40da55ccf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,29 @@ ### Contributing code -If you have improvements to Moto, send us your pull requests! For those -just getting started, Github has a [howto](https://help.github.com/articles/using-pull-requests/). +Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_CONDUCT.md), you can expect to be treated with respect at all times when interacting with this project. + +## Running the tests locally + +Moto has a Makefile which has some helpful commands for getting setup. You should be able to run `make init` to install the dependencies and then `make test` to run the tests. + +## Is there a missing feature? + +Moto is easier to contribute to than you probably think. There's [a list of which endpoints have been implemented](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) and we invite you to add new endpoints to existing services or to add new services. + +How to teach Moto to support a new AWS endpoint: + +* Create an issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. +* Create a [pull request](https://help.github.com/articles/using-pull-requests/) and mention the issue # in the PR description. +* Try to add a failing test case. For example, if you're trying to implement `boto3.client('acm').import_certificate()` you'll want to add a new method called `def test_import_certificate` to `tests/test_acm/test_acm.py`. +* If you can also implement the code that gets that test passing that's great. If not, just ask the community for a hand and somebody will assist you. + +# Maintainers + +## Releasing a new version of Moto + +You'll need a PyPi account and a Dockerhub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. + +* First, `scripts/bump_version` modifies the version and opens a PR +* Then, merge the new pull request +* Finally, generate and ship the new artifacts with `make publish` + diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 172c03f1a..1d9811983 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,8 +1,9 @@ -## acm - 50% implemented +## acm - 41% implemented - [X] add_tags_to_certificate - [X] delete_certificate - [ ] describe_certificate +- [ ] export_certificate - [X] get_certificate - [ ] import_certificate - [ ] list_certificates @@ -10,8 +11,77 @@ - [X] remove_tags_from_certificate - [X] request_certificate - [ ] resend_validation_email +- [ ] update_certificate_options -## apigateway - 18% implemented +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority + +## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_contact_from_address_book +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 24% implemented - [ ] create_api_key - [ ] create_authorizer - [ ] create_base_path_mapping @@ -24,8 +94,9 @@ - [X] create_resource - [X] create_rest_api - [X] create_stage -- [ ] create_usage_plan -- [ ] create_usage_plan_key +- [X] create_usage_plan +- [X] create_usage_plan_key +- [ ] create_vpc_link - [ ] delete_api_key - [ ] delete_authorizer - [ ] delete_base_path_mapping @@ -44,8 +115,9 @@ - [X] delete_resource - [X] delete_rest_api - [ ] delete_stage -- [ ] delete_usage_plan -- [ ] delete_usage_plan_key +- [X] delete_usage_plan +- [X] delete_usage_plan_key +- [ ] delete_vpc_link - [ ] flush_stage_authorizers_cache - [ ] flush_stage_cache - [ ] generate_client_certificate @@ -87,11 +159,14 @@ - [ ] get_sdk_types - [X] get_stage - [X] get_stages +- [ ] get_tags - [ ] get_usage -- [ ] get_usage_plan -- [ ] get_usage_plan_key -- [ ] get_usage_plan_keys -- [ ] get_usage_plans +- [X] get_usage_plan +- [X] get_usage_plan_key +- [X] get_usage_plan_keys +- [X] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links - [ ] import_api_keys - [ ] import_documentation_parts - [ ] import_rest_api @@ -101,8 +176,10 @@ - [ ] put_method - [ ] put_method_response - [ ] put_rest_api +- [ ] tag_resource - [ ] test_invoke_authorizer - [ ] test_invoke_method +- [ ] untag_resource - [ ] update_account - [ ] update_api_key - [ ] update_authorizer @@ -124,6 +201,7 @@ - [X] update_stage - [ ] update_usage - [ ] update_usage_plan +- [ ] update_vpc_link ## application-autoscaling - 0% implemented - [ ] delete_scaling_policy @@ -139,6 +217,7 @@ ## appstream - 0% implemented - [ ] associate_fleet +- [ ] copy_image - [ ] create_directory_config - [ ] create_fleet - [ ] create_image_builder @@ -160,14 +239,46 @@ - [ ] expire_session - [ ] list_associated_fleets - [ ] list_associated_stacks +- [ ] list_tags_for_resource - [ ] start_fleet - [ ] start_image_builder - [ ] stop_fleet - [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource - [ ] update_directory_config - [ ] update_fleet - [ ] update_stack +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_api_key +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + ## athena - 0% implemented - [ ] batch_get_named_query - [ ] batch_get_query_execution @@ -181,7 +292,7 @@ - [ ] start_query_execution - [ ] stop_query_execution -## autoscaling - 42% implemented +## autoscaling - 44% implemented - [X] attach_instances - [X] attach_load_balancer_target_groups - [X] attach_load_balancers @@ -231,10 +342,16 @@ - [X] set_desired_capacity - [X] set_instance_health - [ ] set_instance_protection -- [ ] suspend_processes +- [X] suspend_processes - [ ] terminate_instance_in_auto_scaling_group - [X] update_auto_scaling_group +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans + ## batch - 93% implemented - [ ] cancel_job - [X] create_compute_environment @@ -268,6 +385,26 @@ - [ ] update_notification - [ ] update_subscriber +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + ## clouddirectory - 0% implemented - [ ] add_facet_to_object - [ ] apply_schema @@ -294,8 +431,10 @@ - [ ] detach_typed_link - [ ] disable_directory - [ ] enable_directory +- [ ] get_applied_schema_version - [ ] get_directory - [ ] get_facet +- [ ] get_object_attributes - [ ] get_object_information - [ ] get_schema_as_json - [ ] get_typed_link_facet_information @@ -328,77 +467,105 @@ - [ ] update_object_attributes - [ ] update_schema - [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema -## cloudformation - 17% implemented +## cloudformation - 65% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback -- [ ] create_change_set +- [X] create_change_set - [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set +- [X] create_stack_instances +- [X] create_stack_set +- [X] delete_change_set - [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set +- [X] delete_stack_instances +- [X] delete_stack_set +- [ ] deploy - [ ] describe_account_limits -- [ ] describe_change_set -- [ ] describe_stack_events -- [ ] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation +- [X] describe_change_set +- [ ] describe_stack_drift_detection_status +- [X] describe_stack_events +- [X] describe_stack_instance +- [X] describe_stack_resource +- [ ] describe_stack_resource_drifts +- [X] describe_stack_resources +- [X] describe_stack_set +- [X] describe_stack_set_operation - [X] describe_stacks +- [ ] detect_stack_drift +- [ ] detect_stack_resource_drift - [ ] estimate_template_cost -- [ ] execute_change_set +- [X] execute_change_set - [ ] get_stack_policy -- [ ] get_template +- [X] get_template - [ ] get_template_summary -- [ ] list_change_sets +- [X] list_change_sets - [X] list_exports - [ ] list_imports -- [ ] list_stack_instances +- [X] list_stack_instances - [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets +- [X] list_stack_set_operation_results +- [X] list_stack_set_operations +- [X] list_stack_sets - [X] list_stacks +- [ ] package - [ ] set_stack_policy - [ ] signal_resource -- [ ] stop_stack_set_operation +- [X] stop_stack_set_operation - [X] update_stack -- [ ] update_stack_set +- [X] update_stack_instances +- [X] update_stack_set - [ ] update_termination_protection -- [ ] validate_template +- [X] validate_template +- [ ] wait ## cloudfront - 0% implemented - [ ] create_cloud_front_origin_access_identity - [ ] create_distribution - [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile - [ ] create_invalidation +- [ ] create_public_key - [ ] create_streaming_distribution - [ ] create_streaming_distribution_with_tags - [ ] delete_cloud_front_origin_access_identity - [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key - [ ] delete_service_linked_role - [ ] delete_streaming_distribution - [ ] get_cloud_front_origin_access_identity - [ ] get_cloud_front_origin_access_identity_config - [ ] get_distribution - [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config - [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config - [ ] get_streaming_distribution - [ ] get_streaming_distribution_config - [ ] list_cloud_front_origin_access_identities - [ ] list_distributions - [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles - [ ] list_invalidations +- [ ] list_public_keys - [ ] list_streaming_distributions - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource - [ ] update_cloud_front_origin_access_identity - [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key - [ ] update_streaming_distribution ## cloudhsm - 0% implemented @@ -482,7 +649,7 @@ - [ ] stop_logging - [ ] update_trail -## cloudwatch - 53% implemented +## cloudwatch - 56% implemented - [X] delete_alarms - [X] delete_dashboards - [ ] describe_alarm_history @@ -491,7 +658,8 @@ - [ ] disable_alarm_actions - [ ] enable_alarm_actions - [X] get_dashboard -- [ ] get_metric_statistics +- [ ] get_metric_data +- [X] get_metric_statistics - [X] list_dashboards - [ ] list_metrics - [X] put_dashboard @@ -507,6 +675,7 @@ - [ ] create_webhook - [ ] delete_project - [ ] delete_webhook +- [ ] invalidate_project_cache - [ ] list_builds - [ ] list_builds_for_project - [ ] list_curated_environment_images @@ -514,24 +683,43 @@ - [ ] start_build - [ ] stop_build - [ ] update_project +- [ ] update_webhook ## codecommit - 0% implemented - [ ] batch_get_repositories - [ ] create_branch +- [ ] create_pull_request - [ ] create_repository - [ ] delete_branch +- [ ] delete_comment_content - [ ] delete_repository +- [ ] describe_pull_request_events - [ ] get_blob - [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request - [ ] get_commit - [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request - [ ] get_repository - [ ] get_repository_triggers - [ ] list_branches +- [ ] list_pull_requests - [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply +- [ ] put_file - [ ] put_repository_triggers - [ ] test_repository_triggers +- [ ] update_comment - [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title - [ ] update_repository_description - [ ] update_repository_name @@ -551,6 +739,7 @@ - [ ] delete_application - [ ] delete_deployment_config - [ ] delete_deployment_group +- [ ] delete_git_hub_account_token - [ ] deregister_on_premises_instance - [ ] get_application - [ ] get_application_revision @@ -567,6 +756,7 @@ - [ ] list_deployments - [ ] list_git_hub_account_token_names - [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status - [ ] register_application_revision - [ ] register_on_premises_instance - [ ] remove_tags_from_on_premises_instances @@ -625,16 +815,16 @@ - [ ] update_user_profile ## cognito-identity - 0% implemented -- [ ] create_identity_pool +- [X] create_identity_pool - [ ] delete_identities - [ ] delete_identity_pool - [ ] describe_identity - [ ] describe_identity_pool -- [ ] get_credentials_for_identity -- [ ] get_id +- [X] get_credentials_for_identity +- [X] get_id - [ ] get_identity_pool_roles -- [ ] get_open_id_token -- [ ] get_open_id_token_for_developer_identity +- [X] get_open_id_token +- [X] get_open_id_token_for_developer_identity - [ ] list_identities - [ ] list_identity_pools - [ ] lookup_developer_identity @@ -644,89 +834,102 @@ - [ ] unlink_identity - [ ] update_identity_pool -## cognito-idp - 0% implemented +## cognito-idp - 34% implemented - [ ] add_custom_attributes -- [ ] admin_add_user_to_group +- [X] admin_add_user_to_group - [ ] admin_confirm_sign_up -- [ ] admin_create_user -- [ ] admin_delete_user +- [X] admin_create_user +- [X] admin_delete_user - [ ] admin_delete_user_attributes - [ ] admin_disable_provider_for_user -- [ ] admin_disable_user -- [ ] admin_enable_user +- [X] admin_disable_user +- [X] admin_enable_user - [ ] admin_forget_device - [ ] admin_get_device -- [ ] admin_get_user -- [ ] admin_initiate_auth +- [X] admin_get_user +- [X] admin_initiate_auth - [ ] admin_link_provider_for_user - [ ] admin_list_devices -- [ ] admin_list_groups_for_user -- [ ] admin_remove_user_from_group +- [X] admin_list_groups_for_user +- [ ] admin_list_user_auth_events +- [X] admin_remove_user_from_group - [ ] admin_reset_user_password - [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference - [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback - [ ] admin_update_device_status -- [ ] admin_update_user_attributes +- [X] admin_update_user_attributes - [ ] admin_user_global_sign_out -- [ ] change_password +- [ ] associate_software_token +- [X] change_password - [ ] confirm_device -- [ ] confirm_forgot_password +- [X] confirm_forgot_password - [ ] confirm_sign_up -- [ ] create_group -- [ ] create_identity_provider +- [X] create_group +- [X] create_identity_provider - [ ] create_resource_server - [ ] create_user_import_job -- [ ] create_user_pool -- [ ] create_user_pool_client -- [ ] create_user_pool_domain -- [ ] delete_group -- [ ] delete_identity_provider +- [X] create_user_pool +- [X] create_user_pool_client +- [X] create_user_pool_domain +- [X] delete_group +- [X] delete_identity_provider - [ ] delete_resource_server - [ ] delete_user - [ ] delete_user_attributes -- [ ] delete_user_pool -- [ ] delete_user_pool_client -- [ ] delete_user_pool_domain -- [ ] describe_identity_provider +- [X] delete_user_pool +- [X] delete_user_pool_client +- [X] delete_user_pool_domain +- [X] describe_identity_provider - [ ] describe_resource_server +- [ ] describe_risk_configuration - [ ] describe_user_import_job -- [ ] describe_user_pool -- [ ] describe_user_pool_client -- [ ] describe_user_pool_domain +- [X] describe_user_pool +- [X] describe_user_pool_client +- [X] describe_user_pool_domain - [ ] forget_device - [ ] forgot_password - [ ] get_csv_header - [ ] get_device -- [ ] get_group +- [X] get_group - [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate - [ ] get_ui_customization - [ ] get_user - [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config - [ ] global_sign_out - [ ] initiate_auth - [ ] list_devices -- [ ] list_groups -- [ ] list_identity_providers +- [X] list_groups +- [X] list_identity_providers - [ ] list_resource_servers - [ ] list_user_import_jobs -- [ ] list_user_pool_clients -- [ ] list_user_pools -- [ ] list_users -- [ ] list_users_in_group +- [X] list_user_pool_clients +- [X] list_user_pools +- [X] list_users +- [X] list_users_in_group - [ ] resend_confirmation_code -- [ ] respond_to_auth_challenge +- [X] respond_to_auth_challenge +- [ ] set_risk_configuration - [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config - [ ] set_user_settings - [ ] sign_up - [ ] start_user_import_job - [ ] stop_user_import_job +- [ ] update_auth_event_feedback - [ ] update_device_status - [ ] update_group -- [ ] update_identity_provider +- [x] update_identity_provider - [ ] update_resource_server - [ ] update_user_attributes - [ ] update_user_pool -- [ ] update_user_pool_client +- [X] update_user_pool_client +- [X] update_user_pool_domain +- [ ] verify_software_token - [ ] verify_user_attribute ## cognito-sync - 0% implemented @@ -748,20 +951,44 @@ - [ ] unsubscribe_from_dataset - [ ] update_records +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] list_topics_detection_jobs +- [ ] start_topics_detection_job + ## config - 0% implemented +- [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization - [ ] delete_config_rule +- [ ] delete_configuration_aggregator - [ ] delete_configuration_recorder - [ ] delete_delivery_channel - [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request - [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations - [ ] describe_compliance_by_config_rule - [ ] describe_compliance_by_resource - [ ] describe_config_rule_evaluation_status - [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators - [ ] describe_configuration_recorder_status - [ ] describe_configuration_recorders - [ ] describe_delivery_channel_status - [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary - [ ] get_compliance_details_by_config_rule - [ ] get_compliance_details_by_resource - [ ] get_compliance_summary_by_config_rule @@ -769,7 +996,9 @@ - [ ] get_discovered_resource_counts - [ ] get_resource_config_history - [ ] list_discovered_resources +- [ ] put_aggregation_authorization - [ ] put_config_rule +- [ ] put_configuration_aggregator - [ ] put_configuration_recorder - [ ] put_delivery_channel - [ ] put_evaluations @@ -777,6 +1006,10 @@ - [ ] start_configuration_recorder - [ ] stop_configuration_recorder +## connect - 0% implemented +- [ ] start_outbound_voice_contact +- [ ] stop_contact + ## cur - 0% implemented - [ ] delete_report_definition - [ ] describe_report_definitions @@ -828,11 +1061,13 @@ ## devicefarm - 0% implemented - [ ] create_device_pool +- [ ] create_instance_profile - [ ] create_network_profile - [ ] create_project - [ ] create_remote_access_session - [ ] create_upload - [ ] delete_device_pool +- [ ] delete_instance_profile - [ ] delete_network_profile - [ ] delete_project - [ ] delete_remote_access_session @@ -840,8 +1075,10 @@ - [ ] delete_upload - [ ] get_account_settings - [ ] get_device +- [ ] get_device_instance - [ ] get_device_pool - [ ] get_device_pool_compatibility +- [ ] get_instance_profile - [ ] get_job - [ ] get_network_profile - [ ] get_offering_status @@ -853,8 +1090,10 @@ - [ ] get_upload - [ ] install_to_remote_access_session - [ ] list_artifacts +- [ ] list_device_instances - [ ] list_device_pools - [ ] list_devices +- [ ] list_instance_profiles - [ ] list_jobs - [ ] list_network_profiles - [ ] list_offering_promotions @@ -873,7 +1112,9 @@ - [ ] schedule_run - [ ] stop_remote_access_session - [ ] stop_run +- [ ] update_device_instance - [ ] update_device_pool +- [ ] update_instance_profile - [ ] update_network_profile - [ ] update_project @@ -967,8 +1208,10 @@ - [ ] describe_events - [ ] describe_orderable_replication_instances - [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs - [ ] describe_replication_instances - [ ] describe_replication_subnet_groups +- [ ] describe_replication_task_assessment_results - [ ] describe_replication_tasks - [ ] describe_schemas - [ ] describe_table_statistics @@ -979,10 +1222,12 @@ - [ ] modify_replication_instance - [ ] modify_replication_subnet_group - [ ] modify_replication_task +- [ ] reboot_replication_instance - [ ] refresh_schemas - [ ] reload_tables - [ ] remove_tags_from_resource - [ ] start_replication_task +- [ ] start_replication_task_assessment - [ ] stop_replication_task - [ ] test_connection @@ -1028,23 +1273,35 @@ - [ ] update_radius - [ ] verify_trust -## dynamodb - 36% implemented +## dynamodb - 22% implemented - [ ] batch_get_item - [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table - [X] create_table +- [ ] delete_backup - [X] delete_item - [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table - [ ] describe_limits - [ ] describe_table - [ ] describe_time_to_live - [X] get_item +- [ ] list_backups +- [ ] list_global_tables - [ ] list_tables - [ ] list_tags_of_resource - [X] put_item - [X] query +- [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time - [X] scan - [ ] tag_resource - [ ] untag_resource +- [ ] update_continuous_backups +- [ ] update_global_table - [ ] update_item - [ ] update_table - [ ] update_time_to_live @@ -1055,8 +1312,9 @@ - [ ] get_shard_iterator - [ ] list_streams -## ec2 - 39% implemented +## ec2 - 37% implemented - [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections - [X] accept_vpc_peering_connection - [X] allocate_address - [ ] allocate_hosts @@ -1067,7 +1325,7 @@ - [ ] associate_iam_instance_profile - [X] associate_route_table - [ ] associate_subnet_cidr_block -- [ ] associate_vpc_cidr_block +- [X] associate_vpc_cidr_block - [ ] attach_classic_link_vpc - [X] attach_internet_gateway - [X] attach_network_interface @@ -1086,7 +1344,7 @@ - [ ] confirm_product_instance - [ ] copy_fpga_image - [X] copy_image -- [ ] copy_snapshot +- [X] copy_snapshot - [X] create_customer_gateway - [ ] create_default_subnet - [ ] create_default_vpc @@ -1098,6 +1356,8 @@ - [ ] create_instance_export_task - [X] create_internet_gateway - [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version - [X] create_nat_gateway - [X] create_network_acl - [X] create_network_acl_entry @@ -1115,6 +1375,8 @@ - [X] create_volume - [X] create_vpc - [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration - [X] create_vpc_peering_connection - [X] create_vpn_connection - [ ] create_vpn_connection_route @@ -1126,6 +1388,8 @@ - [ ] delete_fpga_image - [X] delete_internet_gateway - [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions - [X] delete_nat_gateway - [X] delete_network_acl - [X] delete_network_acl_entry @@ -1141,6 +1405,8 @@ - [X] delete_tags - [X] delete_volume - [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations - [ ] delete_vpc_endpoints - [X] delete_vpc_peering_connection - [X] delete_vpn_connection @@ -1149,6 +1415,7 @@ - [X] deregister_image - [ ] describe_account_attributes - [X] describe_addresses +- [ ] describe_aggregate_id_format - [X] describe_availability_zones - [ ] describe_bundle_tasks - [ ] describe_classic_link_instances @@ -1172,10 +1439,13 @@ - [ ] describe_import_image_tasks - [ ] describe_import_snapshot_tasks - [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications - [ ] describe_instance_status - [ ] describe_instances - [X] describe_internet_gateways - [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates - [ ] describe_moving_addresses - [ ] describe_nat_gateways - [ ] describe_network_acls @@ -1184,6 +1454,7 @@ - [X] describe_network_interfaces - [ ] describe_placement_groups - [ ] describe_prefix_lists +- [ ] describe_principal_id_format - [X] describe_regions - [ ] describe_reserved_instances - [ ] describe_reserved_instances_listings @@ -1203,7 +1474,7 @@ - [X] describe_spot_instance_requests - [ ] describe_spot_price_history - [ ] describe_stale_security_groups -- [ ] describe_subnets +- [X] describe_subnets - [X] describe_tags - [ ] describe_volume_attribute - [ ] describe_volume_status @@ -1212,6 +1483,10 @@ - [X] describe_vpc_attribute - [ ] describe_vpc_classic_link - [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions - [ ] describe_vpc_endpoint_services - [ ] describe_vpc_endpoints - [ ] describe_vpc_peering_connections @@ -1230,7 +1505,7 @@ - [ ] disassociate_iam_instance_profile - [X] disassociate_route_table - [ ] disassociate_subnet_cidr_block -- [ ] disassociate_vpc_cidr_block +- [X] disassociate_vpc_cidr_block - [ ] enable_vgw_route_propagation - [ ] enable_volume_io - [ ] enable_vpc_classic_link @@ -1238,6 +1513,7 @@ - [ ] get_console_output - [ ] get_console_screenshot - [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data - [ ] get_password_data - [ ] get_reserved_instances_exchange_quote - [ ] import_image @@ -1251,7 +1527,9 @@ - [ ] modify_identity_id_format - [ ] modify_image_attribute - [X] modify_instance_attribute +- [ ] modify_instance_credit_specification - [ ] modify_instance_placement +- [ ] modify_launch_template - [X] modify_network_interface_attribute - [ ] modify_reserved_instances - [ ] modify_snapshot_attribute @@ -1261,6 +1539,9 @@ - [ ] modify_volume_attribute - [X] modify_vpc_attribute - [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions - [ ] modify_vpc_peering_connection_options - [ ] modify_vpc_tenancy - [ ] monitor_instances @@ -1270,6 +1551,7 @@ - [ ] purchase_scheduled_instances - [X] reboot_instances - [ ] register_image +- [ ] reject_vpc_endpoint_connections - [X] reject_vpc_peering_connection - [X] release_address - [ ] release_hosts @@ -1300,10 +1582,10 @@ - [ ] update_security_group_rule_descriptions_egress - [ ] update_security_group_rule_descriptions_ingress -## ecr - 27% implemented +## ecr - 36% implemented - [ ] batch_check_layer_availability -- [ ] batch_delete_image -- [ ] batch_get_image +- [X] batch_delete_image +- [X] batch_get_image - [ ] complete_layer_upload - [X] create_repository - [ ] delete_lifecycle_policy @@ -1428,6 +1710,7 @@ - [ ] delete_configuration_template - [ ] delete_environment_configuration - [ ] delete_platform_version +- [ ] describe_account_attributes - [ ] describe_application_versions - [ ] describe_applications - [ ] describe_configuration_options @@ -1609,12 +1892,25 @@ - [ ] create_delivery_stream - [ ] delete_delivery_stream - [ ] describe_delivery_stream -- [ ] get_kinesis_stream - [ ] list_delivery_streams - [ ] put_record - [ ] put_record_batch - [ ] update_destination +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + ## gamelift - 0% implemented - [ ] accept_match - [ ] create_alias @@ -1667,6 +1963,7 @@ - [ ] resolve_alias - [ ] search_game_sessions - [ ] start_game_session_placement +- [ ] start_match_backfill - [ ] start_matchmaking - [ ] stop_game_session_placement - [ ] stop_matchmaking @@ -1716,22 +2013,23 @@ - [ ] upload_archive - [ ] upload_multipart_part -## glue - 0% implemented -- [ ] batch_create_partition +## glue - 23% implemented +- [x] batch_create_partition - [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table +- [x] batch_delete_partition +- [x] batch_delete_table +- [ ] batch_delete_table_version - [ ] batch_get_partition - [ ] batch_stop_job_run - [ ] create_classifier - [ ] create_connection - [ ] create_crawler -- [ ] create_database +- [x] create_database - [ ] create_dev_endpoint - [ ] create_job -- [ ] create_partition +- [x] create_partition - [ ] create_script -- [ ] create_table +- [x] create_table - [ ] create_trigger - [ ] create_user_defined_function - [ ] delete_classifier @@ -1740,8 +2038,9 @@ - [ ] delete_database - [ ] delete_dev_endpoint - [ ] delete_job -- [ ] delete_partition -- [ ] delete_table +- [x] delete_partition +- [x] delete_table +- [ ] delete_table_version - [ ] delete_trigger - [ ] delete_user_defined_function - [ ] get_catalog_import_status @@ -1752,7 +2051,7 @@ - [ ] get_crawler - [ ] get_crawler_metrics - [ ] get_crawlers -- [ ] get_database +- [x] get_database - [ ] get_databases - [ ] get_dataflow_graph - [ ] get_dev_endpoint @@ -1762,12 +2061,13 @@ - [ ] get_job_runs - [ ] get_jobs - [ ] get_mapping -- [ ] get_partition -- [ ] get_partitions +- [x] get_partition +- [x] get_partitions - [ ] get_plan -- [ ] get_table -- [ ] get_table_versions -- [ ] get_tables +- [x] get_table +- [x] get_table_version +- [x] get_table_versions +- [x] get_tables - [ ] get_trigger - [ ] get_triggers - [ ] get_user_defined_function @@ -1788,8 +2088,8 @@ - [ ] update_database - [ ] update_dev_endpoint - [ ] update_job -- [ ] update_partition -- [ ] update_table +- [x] update_partition +- [x] update_table - [ ] update_trigger - [ ] update_user_defined_function @@ -1808,6 +2108,9 @@ - [ ] create_group_version - [ ] create_logger_definition - [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job - [ ] create_subscription_definition - [ ] create_subscription_definition_version - [ ] delete_core_definition @@ -1815,6 +2118,7 @@ - [ ] delete_function_definition - [ ] delete_group - [ ] delete_logger_definition +- [ ] delete_resource_definition - [ ] delete_subscription_definition - [ ] disassociate_role_from_group - [ ] disassociate_service_role_from_account @@ -1833,6 +2137,8 @@ - [ ] get_group_version - [ ] get_logger_definition - [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version - [ ] get_service_role_for_account - [ ] get_subscription_definition - [ ] get_subscription_definition_version @@ -1848,6 +2154,8 @@ - [ ] list_groups - [ ] list_logger_definition_versions - [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions - [ ] list_subscription_definition_versions - [ ] list_subscription_definitions - [ ] reset_deployments @@ -1858,8 +2166,48 @@ - [ ] update_group - [ ] update_group_certificate_configuration - [ ] update_logger_definition +- [ ] update_resource_definition - [ ] update_subscription_definition +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + ## health - 0% implemented - [ ] describe_affected_entities - [ ] describe_entity_aggregates @@ -1868,7 +2216,7 @@ - [ ] describe_event_types - [ ] describe_events -## iam - 46% implemented +## iam - 62% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -1885,7 +2233,7 @@ - [X] create_policy - [X] create_policy_version - [X] create_role -- [ ] create_saml_provider +- [X] create_saml_provider - [ ] create_service_linked_role - [ ] create_service_specific_credential - [X] create_user @@ -1903,11 +2251,11 @@ - [X] delete_policy_version - [X] delete_role - [X] delete_role_policy -- [ ] delete_saml_provider -- [ ] delete_server_certificate +- [X] delete_saml_provider +- [X] delete_server_certificate - [ ] delete_service_linked_role - [ ] delete_service_specific_credential -- [ ] delete_signing_certificate +- [X] delete_signing_certificate - [ ] delete_ssh_public_key - [X] delete_user - [X] delete_user_policy @@ -1917,8 +2265,8 @@ - [X] detach_user_policy - [X] enable_mfa_device - [ ] generate_credential_report -- [ ] get_access_key_last_used -- [ ] get_account_authorization_details +- [X] get_access_key_last_used +- [X] get_account_authorization_details - [ ] get_account_password_policy - [ ] get_account_summary - [ ] get_context_keys_for_custom_policy @@ -1933,13 +2281,13 @@ - [X] get_policy_version - [X] get_role - [X] get_role_policy -- [ ] get_saml_provider +- [X] get_saml_provider - [X] get_server_certificate - [ ] get_service_linked_role_deletion_status - [ ] get_ssh_public_key - [X] get_user - [X] get_user_policy -- [ ] list_access_keys +- [X] list_access_keys - [X] list_account_aliases - [X] list_attached_group_policies - [X] list_attached_role_policies @@ -1947,19 +2295,21 @@ - [ ] list_entities_for_policy - [X] list_group_policies - [X] list_groups -- [ ] list_groups_for_user -- [ ] list_instance_profiles -- [ ] list_instance_profiles_for_role +- [X] list_groups_for_user +- [X] list_instance_profiles +- [X] list_instance_profiles_for_role - [X] list_mfa_devices - [ ] list_open_id_connect_providers - [X] list_policies - [X] list_policy_versions - [X] list_role_policies -- [ ] list_roles -- [ ] list_saml_providers -- [ ] list_server_certificates +- [X] list_roles +- [X] list_role_tags +- [ ] list_user_tags +- [X] list_saml_providers +- [X] list_server_certificates - [ ] list_service_specific_credentials -- [ ] list_signing_certificates +- [X] list_signing_certificates - [ ] list_ssh_public_keys - [X] list_user_policies - [X] list_users @@ -1975,21 +2325,26 @@ - [ ] set_default_policy_version - [ ] simulate_custom_policy - [ ] simulate_principal_policy -- [ ] update_access_key +- [X] tag_role +- [ ] tag_user +- [X] untag_role +- [ ] untag_user +- [X] update_access_key - [ ] update_account_password_policy - [ ] update_assume_role_policy - [ ] update_group - [X] update_login_profile - [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role - [ ] update_role_description -- [ ] update_saml_provider +- [X] update_saml_provider - [ ] update_server_certificate - [ ] update_service_specific_credential -- [ ] update_signing_certificate +- [X] update_signing_certificate - [ ] update_ssh_public_key -- [ ] update_user -- [ ] upload_server_certificate -- [ ] upload_signing_certificate +- [X] update_user +- [X] upload_server_certificate +- [X] upload_signing_certificate - [ ] upload_ssh_public_key ## importexport - 0% implemented @@ -2035,64 +2390,130 @@ - [ ] unsubscribe_from_event - [ ] update_assessment_target -## iot - 45% implemented +## iot - 33% implemented - [ ] accept_certificate_transfer +- [X] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [X] attach_policy - [X] attach_principal_policy - [X] attach_thing_principal - [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] clear_default_authorizer +- [ ] create_authorizer - [ ] create_certificate_from_csr +- [X] create_job - [X] create_keys_and_certificate +- [ ] create_ota_update - [X] create_policy - [ ] create_policy_version +- [ ] create_role_alias +- [ ] create_stream - [X] create_thing +- [X] create_thing_group - [X] create_thing_type - [ ] create_topic_rule +- [ ] delete_authorizer - [ ] delete_ca_certificate - [X] delete_certificate +- [ ] delete_ota_update - [X] delete_policy - [ ] delete_policy_version - [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_stream - [X] delete_thing +- [X] delete_thing_group - [X] delete_thing_type - [ ] delete_topic_rule +- [ ] delete_v2_logging_level - [ ] deprecate_thing_type +- [ ] describe_authorizer - [ ] describe_ca_certificate - [X] describe_certificate +- [ ] describe_default_authorizer - [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [X] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_stream - [X] describe_thing +- [X] describe_thing_group +- [ ] describe_thing_registration_task - [X] describe_thing_type +- [X] detach_policy - [X] detach_principal_policy - [X] detach_thing_principal - [ ] disable_topic_rule - [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [ ] get_job_document - [ ] get_logging_options +- [ ] get_ota_update - [X] get_policy - [ ] get_policy_version - [ ] get_registration_code - [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_attached_policies +- [ ] list_authorizers - [ ] list_ca_certificates - [X] list_certificates - [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates - [ ] list_outgoing_certificates - [X] list_policies - [X] list_policy_principals - [ ] list_policy_versions - [X] list_principal_policies - [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_streams +- [ ] list_targets_for_policy +- [X] list_thing_groups +- [X] list_thing_groups_for_thing - [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks - [X] list_thing_types - [X] list_things +- [X] list_things_in_thing_group - [ ] list_topic_rules +- [ ] list_v2_logging_levels - [ ] register_ca_certificate -- [ ] register_certificate +- [X] register_certificate +- [ ] register_thing - [ ] reject_certificate_transfer +- [X] remove_thing_from_thing_group - [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer - [ ] set_default_policy_version - [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer - [ ] transfer_certificate +- [ ] update_authorizer - [ ] update_ca_certificate - [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_stream - [X] update_thing +- [X] update_thing_group +- [X] update_thing_groups_for_thing ## iot-data - 0% implemented - [ ] delete_thing_shadow @@ -2100,6 +2521,12 @@ - [ ] publish - [ ] update_thing_shadow +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + ## kinesis - 61% implemented - [X] add_tags_to_stream - [X] create_stream @@ -2107,11 +2534,13 @@ - [X] delete_stream - [ ] describe_limits - [X] describe_stream +- [X] describe_stream_summary - [ ] disable_enhanced_monitoring - [ ] enable_enhanced_monitoring - [X] get_records - [X] get_shard_iterator - [ ] increase_stream_retention_period +- [ ] list_shards - [X] list_streams - [X] list_tags_for_stream - [X] merge_shards @@ -2123,6 +2552,13 @@ - [ ] stop_stream_encryption - [ ] update_shard_count +## kinesis-video-archived-media - 0% implemented +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + ## kinesisanalytics - 0% implemented - [ ] add_application_cloud_watch_logging_option - [ ] add_application_input @@ -2142,6 +2578,18 @@ - [ ] stop_application - [ ] update_application +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + ## kms - 25% implemented - [ ] cancel_key_deletion - [ ] create_alias @@ -2187,6 +2635,7 @@ - [ ] delete_alias - [ ] delete_event_source_mapping - [ ] delete_function +- [ ] delete_function_concurrency - [ ] get_account_settings - [ ] get_alias - [ ] get_event_source_mapping @@ -2201,6 +2650,7 @@ - [ ] list_tags - [ ] list_versions_by_function - [ ] publish_version +- [ ] put_function_concurrency - [ ] remove_permission - [ ] tag_resource - [ ] untag_resource @@ -2233,6 +2683,7 @@ - [ ] get_builtin_intents - [ ] get_builtin_slot_types - [ ] get_export +- [ ] get_import - [ ] get_intent - [ ] get_intent_versions - [ ] get_intents @@ -2244,6 +2695,7 @@ - [ ] put_bot_alias - [ ] put_intent - [ ] put_slot_type +- [ ] start_import ## lex-runtime - 0% implemented - [ ] post_content @@ -2251,24 +2703,42 @@ ## lightsail - 0% implemented - [ ] allocate_static_ip +- [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate - [ ] attach_static_ip - [ ] close_instance_public_ports +- [ ] create_disk +- [ ] create_disk_from_snapshot +- [ ] create_disk_snapshot - [ ] create_domain - [ ] create_domain_entry - [ ] create_instance_snapshot - [ ] create_instances - [ ] create_instances_from_snapshot - [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate +- [ ] delete_disk +- [ ] delete_disk_snapshot - [ ] delete_domain - [ ] delete_domain_entry - [ ] delete_instance - [ ] delete_instance_snapshot - [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate +- [ ] detach_disk +- [ ] detach_instances_from_load_balancer - [ ] detach_static_ip - [ ] download_default_key_pair - [ ] get_active_names - [ ] get_blueprints - [ ] get_bundles +- [ ] get_disk +- [ ] get_disk_snapshot +- [ ] get_disk_snapshots +- [ ] get_disks - [ ] get_domain - [ ] get_domains - [ ] get_instance @@ -2281,6 +2751,10 @@ - [ ] get_instances - [ ] get_key_pair - [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers - [ ] get_operation - [ ] get_operations - [ ] get_operations_for_resource @@ -2298,8 +2772,9 @@ - [ ] stop_instance - [ ] unpeer_vpc - [ ] update_domain_entry +- [ ] update_load_balancer_attribute -## logs - 24% implemented +## logs - 27% implemented - [ ] associate_kms_key - [ ] cancel_export_task - [ ] create_export_task @@ -2314,7 +2789,7 @@ - [ ] delete_subscription_filter - [ ] describe_destinations - [ ] describe_export_tasks -- [ ] describe_log_groups +- [X] describe_log_groups - [X] describe_log_streams - [ ] describe_metric_filters - [ ] describe_resource_policies @@ -2371,6 +2846,79 @@ - [ ] generate_data_set - [ ] start_support_data_export +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] start_channel +- [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] delete_cors_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] get_cors_policy +- [ ] list_containers +- [ ] put_container_policy +- [ ] put_cors_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + ## meteringmarketplace - 0% implemented - [ ] batch_meter_usage - [ ] meter_usage @@ -2405,6 +2953,25 @@ - [ ] list_projects - [ ] update_project +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + ## mturk - 0% implemented - [ ] accept_qualification_request - [ ] approve_assignment @@ -2446,13 +3013,13 @@ - [ ] update_notification_settings - [ ] update_qualification_type -## opsworks - 9% implemented +## opsworks - 12% implemented - [ ] assign_instance - [ ] assign_volume - [ ] associate_elastic_ip - [ ] attach_elastic_load_balancer - [ ] clone_stack -- [ ] create_app +- [X] create_app - [ ] create_deployment - [X] create_instance - [X] create_layer @@ -2469,7 +3036,7 @@ - [ ] deregister_rds_db_instance - [ ] deregister_volume - [ ] describe_agent_versions -- [ ] describe_apps +- [X] describe_apps - [ ] describe_commands - [ ] describe_deployments - [ ] describe_ecs_clusters @@ -2479,6 +3046,7 @@ - [X] describe_layers - [ ] describe_load_based_auto_scaling - [ ] describe_my_user_profile +- [ ] describe_operating_systems - [ ] describe_permissions - [ ] describe_raid_arrays - [ ] describe_rds_db_instances @@ -2538,43 +3106,46 @@ - [ ] update_server - [ ] update_server_engine_attributes -## organizations - 0% implemented +## organizations - 47% implemented - [ ] accept_handshake -- [ ] attach_policy +- [X] attach_policy - [ ] cancel_handshake -- [ ] create_account -- [ ] create_organization -- [ ] create_organizational_unit -- [ ] create_policy +- [X] create_account +- [X] create_organization +- [X] create_organizational_unit +- [X] create_policy - [ ] decline_handshake - [ ] delete_organization - [ ] delete_organizational_unit - [ ] delete_policy -- [ ] describe_account +- [X] describe_account - [ ] describe_create_account_status - [ ] describe_handshake -- [ ] describe_organization -- [ ] describe_organizational_unit -- [ ] describe_policy +- [X] describe_organization +- [X] describe_organizational_unit +- [X] describe_policy - [ ] detach_policy +- [ ] disable_aws_service_access - [ ] disable_policy_type - [ ] enable_all_features +- [ ] enable_aws_service_access - [ ] enable_policy_type - [ ] invite_account_to_organization - [ ] leave_organization -- [ ] list_accounts -- [ ] list_accounts_for_parent -- [ ] list_children +- [X] list_accounts +- [X] list_accounts_for_parent +- [ ] list_aws_service_access_for_organization +- [X] list_children - [ ] list_create_account_status - [ ] list_handshakes_for_account - [ ] list_handshakes_for_organization -- [ ] list_organizational_units_for_parent -- [ ] list_parents -- [ ] list_policies -- [ ] list_policies_for_target -- [ ] list_roots -- [ ] list_targets_for_policy -- [ ] move_account +- [X] list_organizational_units_for_parent +- [X] list_parents +- [X] list_policies +- [X] list_policies_for_target +- [X] list_roots +- [X] list_targets_for_policy +- [X] move_account - [ ] remove_account_from_organization - [ ] update_organizational_unit - [ ] update_policy @@ -2582,6 +3153,7 @@ ## pinpoint - 0% implemented - [ ] create_app - [ ] create_campaign +- [ ] create_export_job - [ ] create_import_job - [ ] create_segment - [ ] delete_adm_channel @@ -2593,6 +3165,7 @@ - [ ] delete_baidu_channel - [ ] delete_campaign - [ ] delete_email_channel +- [ ] delete_endpoint - [ ] delete_event_stream - [ ] delete_gcm_channel - [ ] delete_segment @@ -2614,10 +3187,13 @@ - [ ] get_email_channel - [ ] get_endpoint - [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs - [ ] get_gcm_channel - [ ] get_import_job - [ ] get_import_jobs - [ ] get_segment +- [ ] get_segment_export_jobs - [ ] get_segment_import_jobs - [ ] get_segment_version - [ ] get_segment_versions @@ -2741,12 +3317,13 @@ - [ ] restore_db_cluster_from_snapshot - [ ] restore_db_cluster_to_point_in_time - [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_from_s3 - [ ] restore_db_instance_to_point_in_time - [ ] revoke_db_security_group_ingress - [ ] start_db_instance - [ ] stop_db_instance -## redshift - 31% implemented +## redshift - 41% implemented - [ ] authorize_cluster_security_group_ingress - [ ] authorize_snapshot_access - [ ] copy_cluster_snapshot @@ -2758,7 +3335,7 @@ - [ ] create_event_subscription - [ ] create_hsm_client_certificate - [ ] create_hsm_configuration -- [ ] create_snapshot_copy_grant +- [X] create_snapshot_copy_grant - [X] create_tags - [X] delete_cluster - [X] delete_cluster_parameter_group @@ -2768,7 +3345,7 @@ - [ ] delete_event_subscription - [ ] delete_hsm_client_certificate - [ ] delete_hsm_configuration -- [ ] delete_snapshot_copy_grant +- [X] delete_snapshot_copy_grant - [X] delete_tags - [X] describe_cluster_parameter_groups - [ ] describe_cluster_parameters @@ -2788,20 +3365,20 @@ - [ ] describe_reserved_node_offerings - [ ] describe_reserved_nodes - [ ] describe_resize -- [ ] describe_snapshot_copy_grants +- [X] describe_snapshot_copy_grants - [ ] describe_table_restore_status - [X] describe_tags - [ ] disable_logging -- [ ] disable_snapshot_copy +- [X] disable_snapshot_copy - [ ] enable_logging -- [ ] enable_snapshot_copy +- [X] enable_snapshot_copy - [ ] get_cluster_credentials - [X] modify_cluster - [ ] modify_cluster_iam_roles - [ ] modify_cluster_parameter_group - [ ] modify_cluster_subnet_group - [ ] modify_event_subscription -- [ ] modify_snapshot_copy_retention_period +- [X] modify_snapshot_copy_retention_period - [ ] purchase_reserved_node_offering - [ ] reboot_cluster - [ ] reset_cluster_parameter_group @@ -2814,27 +3391,60 @@ ## rekognition - 0% implemented - [ ] compare_faces - [ ] create_collection +- [ ] create_stream_processor - [ ] delete_collection - [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_stream_processor - [ ] detect_faces - [ ] detect_labels - [ ] detect_moderation_labels +- [ ] detect_text - [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking - [ ] index_faces - [ ] list_collections - [ ] list_faces +- [ ] list_stream_processors - [ ] recognize_celebrities - [ ] search_faces - [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor -## resourcegroupstaggingapi - 0% implemented -- [ ] get_resources -- [ ] get_tag_keys -- [ ] get_tag_values +## resource-groups - 62% implemented +- [X] create_group +- [X] delete_group +- [X] get_group +- [X] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [X] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [X] update_group +- [X] update_group_query + +## resourcegroupstaggingapi - 60% implemented +- [X] get_resources +- [X] get_tag_keys +- [X] get_tag_values - [ ] tag_resources - [ ] untag_resources -## route53 - 13% implemented +## route53 - 12% implemented - [ ] associate_vpc_with_hosted_zone - [ ] change_resource_record_sets - [X] change_tags_for_resource @@ -2854,6 +3464,7 @@ - [ ] delete_traffic_policy_instance - [ ] delete_vpc_association_authorization - [ ] disassociate_vpc_from_hosted_zone +- [ ] get_account_limit - [ ] get_change - [ ] get_checker_ip_ranges - [ ] get_geo_location @@ -2863,8 +3474,10 @@ - [ ] get_health_check_status - [X] get_hosted_zone - [ ] get_hosted_zone_count +- [ ] get_hosted_zone_limit - [ ] get_query_logging_config - [ ] get_reusable_delegation_set +- [ ] get_reusable_delegation_set_limit - [ ] get_traffic_policy - [ ] get_traffic_policy_instance - [ ] get_traffic_policy_instance_count @@ -2915,7 +3528,7 @@ - [ ] update_tags_for_domain - [ ] view_billing -## s3 - 12% implemented +## s3 - 15% implemented - [ ] abort_multipart_upload - [ ] complete_multipart_upload - [ ] copy_object @@ -2935,7 +3548,7 @@ - [ ] delete_object - [ ] delete_object_tagging - [ ] delete_objects -- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration - [ ] get_bucket_cors @@ -2943,7 +3556,7 @@ - [ ] get_bucket_inventory_configuration - [ ] get_bucket_lifecycle - [ ] get_bucket_lifecycle_configuration -- [ ] get_bucket_location +- [X] get_bucket_location - [ ] get_bucket_logging - [ ] get_bucket_metrics_configuration - [ ] get_bucket_notification @@ -2969,7 +3582,7 @@ - [ ] list_objects - [ ] list_objects_v2 - [ ] list_parts -- [ ] put_bucket_accelerate_configuration +- [X] put_bucket_accelerate_configuration - [ ] put_bucket_acl - [ ] put_bucket_analytics_configuration - [X] put_bucket_cors @@ -2977,10 +3590,10 @@ - [ ] put_bucket_inventory_configuration - [ ] put_bucket_lifecycle - [ ] put_bucket_lifecycle_configuration -- [ ] put_bucket_logging +- [X] put_bucket_logging - [ ] put_bucket_metrics_configuration - [ ] put_bucket_notification -- [ ] put_bucket_notification_configuration +- [X] put_bucket_notification_configuration - [ ] put_bucket_policy - [ ] put_bucket_replication - [ ] put_bucket_request_payment @@ -2991,9 +3604,49 @@ - [ ] put_object_acl - [ ] put_object_tagging - [ ] restore_object +- [ ] select_object_content - [ ] upload_part - [ ] upload_part_copy +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_training_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] start_notebook_instance +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + ## sdb - 0% implemented - [ ] batch_delete_attributes - [ ] batch_put_attributes @@ -3006,6 +3659,35 @@ - [ ] put_attributes - [ ] select +## secretsmanager - 33% implemented +- [ ] cancel_rotate_secret +- [X] create_secret +- [X] delete_secret +- [X] describe_secret +- [X] get_random_password +- [X] get_secret_value +- [X] list_secret_version_ids +- [X] list_secrets +- [X] put_secret_value +- [X] restore_secret +- [X] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] delete_application +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + ## servicecatalog - 0% implemented - [ ] accept_portfolio_share - [ ] associate_principal_with_portfolio @@ -3016,13 +3698,16 @@ - [ ] create_portfolio - [ ] create_portfolio_share - [ ] create_product +- [ ] create_provisioned_product_plan - [ ] create_provisioning_artifact - [ ] create_tag_option - [ ] delete_constraint - [ ] delete_portfolio - [ ] delete_portfolio_share - [ ] delete_product +- [ ] delete_provisioned_product_plan - [ ] delete_provisioning_artifact +- [ ] delete_tag_option - [ ] describe_constraint - [ ] describe_copy_product_status - [ ] describe_portfolio @@ -3030,6 +3715,7 @@ - [ ] describe_product_as_admin - [ ] describe_product_view - [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan - [ ] describe_provisioning_artifact - [ ] describe_provisioning_parameters - [ ] describe_record @@ -3037,6 +3723,7 @@ - [ ] disassociate_principal_from_portfolio - [ ] disassociate_product_from_portfolio - [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan - [ ] list_accepted_portfolio_shares - [ ] list_constraints_for_portfolio - [ ] list_launch_paths @@ -3044,6 +3731,7 @@ - [ ] list_portfolios - [ ] list_portfolios_for_product - [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans - [ ] list_provisioning_artifacts - [ ] list_record_history - [ ] list_resources_for_tag_option @@ -3053,6 +3741,7 @@ - [ ] scan_provisioned_products - [ ] search_products - [ ] search_products_as_admin +- [ ] search_provisioned_products - [ ] terminate_provisioned_product - [ ] update_constraint - [ ] update_portfolio @@ -3061,11 +3750,32 @@ - [ ] update_provisioning_artifact - [ ] update_tag_option -## ses - 13% implemented +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_instance_custom_health_status +- [ ] update_service + +## ses - 11% implemented - [ ] clone_receipt_rule_set - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template - [ ] create_receipt_filter - [ ] create_receipt_rule - [ ] create_receipt_rule_set @@ -3073,6 +3783,7 @@ - [ ] delete_configuration_set - [ ] delete_configuration_set_event_destination - [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template - [X] delete_identity - [ ] delete_identity_policy - [ ] delete_receipt_filter @@ -3084,6 +3795,8 @@ - [ ] describe_configuration_set - [ ] describe_receipt_rule - [ ] describe_receipt_rule_set +- [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template - [ ] get_identity_dkim_attributes - [ ] get_identity_mail_from_domain_attributes - [ ] get_identity_notification_attributes @@ -3093,6 +3806,7 @@ - [ ] get_send_statistics - [ ] get_template - [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates - [X] list_identities - [ ] list_identity_policies - [ ] list_receipt_filters @@ -3103,6 +3817,7 @@ - [ ] reorder_receipt_rule_set - [ ] send_bounce - [ ] send_bulk_templated_email +- [ ] send_custom_verification_email - [X] send_email - [X] send_raw_email - [ ] send_templated_email @@ -3114,8 +3829,12 @@ - [ ] set_identity_notification_topic - [ ] set_receipt_rule_position - [ ] test_render_template +- [ ] update_account_sending_enabled - [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_reputation_metrics_enabled +- [ ] update_configuration_set_sending_enabled - [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template - [ ] update_receipt_rule - [ ] update_template - [ ] verify_domain_dkim @@ -3131,6 +3850,7 @@ - [ ] describe_attack - [ ] describe_protection - [ ] describe_subscription +- [ ] get_subscription_state - [ ] list_attacks - [ ] list_protections @@ -3198,7 +3918,7 @@ - [X] subscribe - [X] unsubscribe -## sqs - 60% implemented +## sqs - 65% implemented - [X] add_permission - [X] change_message_visibility - [ ] change_message_visibility_batch @@ -3207,7 +3927,7 @@ - [ ] delete_message_batch - [X] delete_queue - [ ] get_queue_attributes -- [ ] get_queue_url +- [X] get_queue_url - [X] list_dead_letter_source_queues - [ ] list_queue_tags - [X] list_queues @@ -3216,11 +3936,11 @@ - [X] remove_permission - [X] send_message - [ ] send_message_batch -- [ ] set_queue_attributes +- [X] set_queue_attributes - [X] tag_queue - [X] untag_queue -## ssm - 9% implemented +## ssm - 11% implemented - [X] add_tags_to_resource - [ ] cancel_command - [ ] create_activation @@ -3245,6 +3965,7 @@ - [ ] describe_activations - [ ] describe_association - [ ] describe_automation_executions +- [ ] describe_automation_step_executions - [ ] describe_available_patches - [ ] describe_document - [ ] describe_document_permission @@ -3286,7 +4007,7 @@ - [ ] list_association_versions - [ ] list_associations - [ ] list_command_invocations -- [ ] list_commands +- [X] list_commands - [ ] list_compliance_items - [ ] list_compliance_summaries - [ ] list_document_versions @@ -3305,7 +4026,7 @@ - [ ] register_task_with_maintenance_window - [X] remove_tags_from_resource - [ ] send_automation_signal -- [ ] send_command +- [X] send_command - [ ] start_automation_execution - [ ] stop_automation_execution - [ ] update_association @@ -3326,6 +4047,7 @@ - [ ] describe_activity - [ ] describe_execution - [ ] describe_state_machine +- [ ] describe_state_machine_for_execution - [ ] get_activity_task - [ ] get_execution_history - [ ] list_activities @@ -3336,6 +4058,7 @@ - [ ] send_task_success - [ ] start_execution - [ ] stop_execution +- [ ] update_state_machine ## storagegateway - 0% implemented - [ ] activate_gateway @@ -3384,6 +4107,7 @@ - [ ] list_volume_initiators - [ ] list_volume_recovery_points - [ ] list_volumes +- [ ] notify_when_uploaded - [ ] refresh_cache - [ ] remove_tags_from_resource - [ ] reset_cache @@ -3404,7 +4128,7 @@ ## sts - 42% implemented - [X] assume_role - [ ] assume_role_with_saml -- [ ] assume_role_with_web_identity +- [X] assume_role_with_web_identity - [ ] decode_authorization_message - [ ] get_caller_identity - [X] get_federation_token @@ -3426,7 +4150,7 @@ - [ ] refresh_trusted_advisor_check - [ ] resolve_case -## swf - 54% implemented +## swf - 58% implemented - [ ] count_closed_workflow_executions - [ ] count_open_workflow_executions - [X] count_pending_activity_tasks @@ -3455,10 +4179,23 @@ - [X] respond_activity_task_completed - [X] respond_activity_task_failed - [X] respond_decision_task_completed -- [ ] signal_workflow_execution +- [X] signal_workflow_execution - [X] start_workflow_execution - [X] terminate_workflow_execution +## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary +- [ ] get_transcription_job +- [ ] get_vocabulary +- [ ] list_transcription_jobs +- [ ] list_vocabularies +- [ ] start_transcription_job +- [ ] update_vocabulary + +## translate - 0% implemented +- [ ] translate_text + ## waf - 0% implemented - [ ] create_byte_match_set - [ ] create_geo_match_set @@ -3467,6 +4204,7 @@ - [ ] create_regex_match_set - [ ] create_regex_pattern_set - [ ] create_rule +- [ ] create_rule_group - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl @@ -3474,10 +4212,12 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set - [ ] delete_regex_pattern_set - [ ] delete_rule +- [ ] delete_rule_group - [ ] delete_size_constraint_set - [ ] delete_sql_injection_match_set - [ ] delete_web_acl @@ -3487,27 +4227,33 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys - [ ] get_regex_match_set - [ ] get_regex_pattern_set - [ ] get_rule +- [ ] get_rule_group - [ ] get_sampled_requests - [ ] get_size_constraint_set - [ ] get_sql_injection_match_set - [ ] get_web_acl - [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets - [ ] list_rate_based_rules - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets +- [ ] list_rule_groups - [ ] list_rules - [ ] list_size_constraint_sets - [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set - [ ] update_ip_set @@ -3515,6 +4261,7 @@ - [ ] update_regex_match_set - [ ] update_regex_pattern_set - [ ] update_rule +- [ ] update_rule_group - [ ] update_size_constraint_set - [ ] update_sql_injection_match_set - [ ] update_web_acl @@ -3529,6 +4276,7 @@ - [ ] create_regex_match_set - [ ] create_regex_pattern_set - [ ] create_rule +- [ ] create_rule_group - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl @@ -3536,10 +4284,12 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set - [ ] delete_regex_pattern_set - [ ] delete_rule +- [ ] delete_rule_group - [ ] delete_size_constraint_set - [ ] delete_sql_injection_match_set - [ ] delete_web_acl @@ -3550,17 +4300,20 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys - [ ] get_regex_match_set - [ ] get_regex_pattern_set - [ ] get_rule +- [ ] get_rule_group - [ ] get_sampled_requests - [ ] get_size_constraint_set - [ ] get_sql_injection_match_set - [ ] get_web_acl - [ ] get_web_acl_for_resource - [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets @@ -3568,11 +4321,14 @@ - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets - [ ] list_resources_for_web_acl +- [ ] list_rule_groups - [ ] list_rules - [ ] list_size_constraint_sets - [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set - [ ] update_ip_set @@ -3580,6 +4336,7 @@ - [ ] update_regex_match_set - [ ] update_regex_pattern_set - [ ] update_rule +- [ ] update_rule_group - [ ] update_size_constraint_set - [ ] update_sql_injection_match_set - [ ] update_web_acl @@ -3608,6 +4365,7 @@ - [ ] describe_comments - [ ] describe_document_versions - [ ] describe_folder_contents +- [ ] describe_groups - [ ] describe_notification_subscriptions - [ ] describe_resource_permissions - [ ] describe_root_folders @@ -3626,6 +4384,39 @@ - [ ] update_folder - [ ] update_user +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_mailbox_permissions +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_mailbox_permissions +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] put_mailbox_permissions +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + ## workspaces - 0% implemented - [ ] create_tags - [ ] create_workspaces diff --git a/MANIFEST.in b/MANIFEST.in index 43e8120e4..bd7eb968a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -2,5 +2,6 @@ include README.md LICENSE AUTHORS.md include requirements.txt requirements-dev.txt tox.ini include moto/ec2/resources/instance_types.json include moto/ec2/resources/amis.json +include moto/cognitoidp/resources/*.json recursive-include moto/templates * -recursive-include tests * +recursive-include tests * diff --git a/Makefile b/Makefile index 99b7f2620..2a7249760 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ endif init: @python setup.py develop - @pip install -r requirements.txt + @pip install -r requirements-dev.txt lint: flake8 moto @@ -19,6 +19,7 @@ test: lint rm -f .coverage rm -rf cover @nosetests -sv --with-coverage --cover-html ./tests/ $(TEST_EXCLUDE) + test_server: @TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/ @@ -36,14 +37,13 @@ tag_github_release: git tag `python setup.py --version` git push origin `python setup.py --version` -publish: implementation_coverage \ - upload_pypi_artifact \ +publish: upload_pypi_artifact \ tag_github_release \ push_dockerhub_image implementation_coverage: - ./scripts/implementation_coverage.py > IMPLEMENTATION_COVERAGE.md - git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage" + ./scripts/implementation_coverage.py + git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage" || true scaffold: @pip install -r requirements-dev.txt > /dev/null diff --git a/README.md b/README.md index 59dc67432..0a9b843f0 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,12 @@ [![Join the chat at https://gitter.im/awsmoto/Lobby](https://badges.gitter.im/awsmoto/Lobby.svg)](https://gitter.im/awsmoto/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -[![Build Status](https://travis-ci.org/spulec/moto.png?branch=master)](https://travis-ci.org/spulec/moto) -[![Coverage Status](https://coveralls.io/repos/spulec/moto/badge.png?branch=master)](https://coveralls.io/r/spulec/moto) +[![Build Status](https://travis-ci.org/spulec/moto.svg?branch=master)](https://travis-ci.org/spulec/moto) +[![Coverage Status](https://coveralls.io/repos/spulec/moto/badge.svg?branch=master)](https://coveralls.io/r/spulec/moto) [![Docs](https://readthedocs.org/projects/pip/badge/?version=stable)](http://docs.getmoto.org) +![PyPI](https://img.shields.io/pypi/v/moto.svg) +![PyPI - Python Version](https://img.shields.io/pypi/pyversions/moto.svg) +![PyPI - Downloads](https://img.shields.io/pypi/dw/moto.svg) # In a nutshell @@ -47,7 +50,7 @@ def test_my_model_save(): body = conn.Object('mybucket', 'steve').get()['Body'].read().decode("utf-8") - assert body == b'is awesome' + assert body == 'is awesome' ``` With the decorator wrapping the test, all the calls to s3 are automatically mocked out. The mock keeps the state of the buckets and keys. @@ -55,87 +58,99 @@ With the decorator wrapping the test, all the calls to s3 are automatically mock It gets even better! Moto isn't just for Python code and it isn't just for S3. Look at the [standalone server mode](https://github.com/spulec/moto#stand-alone-server-mode) for more information about running Moto with other languages. Here's the status of the other AWS services implemented: ```gherkin -|------------------------------------------------------------------------------| -| Service Name | Decorator | Development Status | -|------------------------------------------------------------------------------| -| ACM | @mock_acm | all endpoints done | -|------------------------------------------------------------------------------| -| API Gateway | @mock_apigateway | core endpoints done | -|------------------------------------------------------------------------------| -| Autoscaling | @mock_autoscaling| core endpoints done | -|------------------------------------------------------------------------------| -| Cloudformation | @mock_cloudformation| core endpoints done | -|------------------------------------------------------------------------------| -| Cloudwatch | @mock_cloudwatch | basic endpoints done | -|------------------------------------------------------------------------------| -| CloudwatchEvents | @mock_events | all endpoints done | -|------------------------------------------------------------------------------| -| Data Pipeline | @mock_datapipeline| basic endpoints done | -|------------------------------------------------------------------------------| -| DynamoDB | @mock_dynamodb | core endpoints done | -| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | -|------------------------------------------------------------------------------| -| EC2 | @mock_ec2 | core endpoints done | -| - AMI | | core endpoints done | -| - EBS | | core endpoints done | -| - Instances | | all endpoints done | -| - Security Groups | | core endpoints done | -| - Tags | | all endpoints done | -|------------------------------------------------------------------------------| -| ECR | @mock_ecr | basic endpoints done | -|------------------------------------------------------------------------------| -| ECS | @mock_ecs | basic endpoints done | -|------------------------------------------------------------------------------| -| ELB | @mock_elb | core endpoints done | -|------------------------------------------------------------------------------| -| ELBv2 | @mock_elbv2 | all endpoints done | -|------------------------------------------------------------------------------| -| EMR | @mock_emr | core endpoints done | -|------------------------------------------------------------------------------| -| Glacier | @mock_glacier | core endpoints done | -|------------------------------------------------------------------------------| -| IAM | @mock_iam | core endpoints done | -|------------------------------------------------------------------------------| -| IoT | @mock_iot | core endpoints done | -| | @mock_iotdata | core endpoints done | -|------------------------------------------------------------------------------| -| Lambda | @mock_lambda | basic endpoints done, requires | -| | | docker | -|------------------------------------------------------------------------------| -| Logs | @mock_logs | basic endpoints done | -|------------------------------------------------------------------------------| -| Kinesis | @mock_kinesis | core endpoints done | -|------------------------------------------------------------------------------| -| KMS | @mock_kms | basic endpoints done | -|------------------------------------------------------------------------------| -| Polly | @mock_polly | all endpoints done | -|------------------------------------------------------------------------------| -| RDS | @mock_rds | core endpoints done | -|------------------------------------------------------------------------------| -| RDS2 | @mock_rds2 | core endpoints done | -|------------------------------------------------------------------------------| -| Redshift | @mock_redshift | core endpoints done | -|------------------------------------------------------------------------------| -| Route53 | @mock_route53 | core endpoints done | -|------------------------------------------------------------------------------| -| S3 | @mock_s3 | core endpoints done | -|------------------------------------------------------------------------------| -| SES | @mock_ses | all endpoints done | -|------------------------------------------------------------------------------| -| SNS | @mock_sns | all endpoints done | -|------------------------------------------------------------------------------| -| SQS | @mock_sqs | core endpoints done | -|------------------------------------------------------------------------------| -| SSM | @mock_ssm | core endpoints done | -|------------------------------------------------------------------------------| -| STS | @mock_sts | core endpoints done | -|------------------------------------------------------------------------------| -| SWF | @mock_swf | basic endpoints done | -|------------------------------------------------------------------------------| -| X-Ray | @mock_xray | all endpoints done | -|------------------------------------------------------------------------------| +|-------------------------------------------------------------------------------------| +| Service Name | Decorator | Development Status | +|-------------------------------------------------------------------------------------| +| ACM | @mock_acm | all endpoints done | +|-------------------------------------------------------------------------------------| +| API Gateway | @mock_apigateway | core endpoints done | +|-------------------------------------------------------------------------------------| +| Autoscaling | @mock_autoscaling | core endpoints done | +|-------------------------------------------------------------------------------------| +| Cloudformation | @mock_cloudformation | core endpoints done | +|-------------------------------------------------------------------------------------| +| Cloudwatch | @mock_cloudwatch | basic endpoints done | +|-------------------------------------------------------------------------------------| +| CloudwatchEvents | @mock_events | all endpoints done | +|-------------------------------------------------------------------------------------| +| Cognito Identity | @mock_cognitoidentity | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Cognito Identity Provider | @mock_cognitoidp | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Config | @mock_config | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Data Pipeline | @mock_datapipeline | basic endpoints done | +|-------------------------------------------------------------------------------------| +| DynamoDB | @mock_dynamodb | core endpoints done | +| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | +|-------------------------------------------------------------------------------------| +| EC2 | @mock_ec2 | core endpoints done | +| - AMI | | core endpoints done | +| - EBS | | core endpoints done | +| - Instances | | all endpoints done | +| - Security Groups | | core endpoints done | +| - Tags | | all endpoints done | +|-------------------------------------------------------------------------------------| +| ECR | @mock_ecr | basic endpoints done | +|-------------------------------------------------------------------------------------| +| ECS | @mock_ecs | basic endpoints done | +|-------------------------------------------------------------------------------------| +| ELB | @mock_elb | core endpoints done | +|-------------------------------------------------------------------------------------| +| ELBv2 | @mock_elbv2 | all endpoints done | +|-------------------------------------------------------------------------------------| +| EMR | @mock_emr | core endpoints done | +|-------------------------------------------------------------------------------------| +| Glacier | @mock_glacier | core endpoints done | +|-------------------------------------------------------------------------------------| +| IAM | @mock_iam | core endpoints done | +|-------------------------------------------------------------------------------------| +| IoT | @mock_iot | core endpoints done | +| | @mock_iotdata | core endpoints done | +|-------------------------------------------------------------------------------------| +| Kinesis | @mock_kinesis | core endpoints done | +|-------------------------------------------------------------------------------------| +| KMS | @mock_kms | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Lambda | @mock_lambda | basic endpoints done, requires | +| | | docker | +|-------------------------------------------------------------------------------------| +| Logs | @mock_logs | basic endpoints done | +|-------------------------------------------------------------------------------------| +| Organizations | @mock_organizations | some core endpoints done | +|-------------------------------------------------------------------------------------| +| Polly | @mock_polly | all endpoints done | +|-------------------------------------------------------------------------------------| +| RDS | @mock_rds | core endpoints done | +|-------------------------------------------------------------------------------------| +| RDS2 | @mock_rds2 | core endpoints done | +|-------------------------------------------------------------------------------------| +| Redshift | @mock_redshift | core endpoints done | +|-------------------------------------------------------------------------------------| +| Route53 | @mock_route53 | core endpoints done | +|-------------------------------------------------------------------------------------| +| S3 | @mock_s3 | core endpoints done | +|-------------------------------------------------------------------------------------| +| SecretsManager | @mock_secretsmanager | basic endpoints done | +|-------------------------------------------------------------------------------------| +| SES | @mock_ses | all endpoints done | +|-------------------------------------------------------------------------------------| +| SNS | @mock_sns | all endpoints done | +|-------------------------------------------------------------------------------------| +| SQS | @mock_sqs | core endpoints done | +|-------------------------------------------------------------------------------------| +| SSM | @mock_ssm | core endpoints done | +|-------------------------------------------------------------------------------------| +| STS | @mock_sts | core endpoints done | +|-------------------------------------------------------------------------------------| +| SWF | @mock_swf | basic endpoints done | +|-------------------------------------------------------------------------------------| +| X-Ray | @mock_xray | all endpoints done | +|-------------------------------------------------------------------------------------| ``` +For a full list of endpoint [implementation coverage](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) + ### Another Example Imagine you have a function that you use to launch new ec2 instances: @@ -167,7 +182,7 @@ def test_add_servers(): ``` #### Using moto 1.0.X with boto2 -moto 1.0.X mock docorators are defined for boto3 and do not work with boto2. Use the @mock_AWSSVC_deprecated to work with boto2. +moto 1.0.X mock decorators are defined for boto3 and do not work with boto2. Use the @mock_AWSSVC_deprecated to work with boto2. Using moto with boto2 ```python @@ -249,7 +264,7 @@ It uses flask, which isn't a default dependency. You can install the server 'extra' package with: ```python -pip install moto[server] +pip install "moto[server]" ``` You can then start it running a service: @@ -306,3 +321,11 @@ boto3.resource( ```console $ pip install moto ``` + +## Releases + +Releases are done from travisci. Fairly closely following this: +https://docs.travis-ci.com/user/deployment/pypi/ + +- Commits to `master` branch do a dev deploy to pypi. +- Commits to a tag do a real deploy to pypi. diff --git a/docs/docs/getting_started.rst b/docs/docs/getting_started.rst index 97f667d26..d52e76235 100644 --- a/docs/docs/getting_started.rst +++ b/docs/docs/getting_started.rst @@ -20,7 +20,7 @@ If you want to install ``moto`` from source:: Moto usage ---------- -For example we have the following code we want to test: +For example, we have the following code we want to test: .. sourcecode:: python @@ -39,12 +39,12 @@ For example we have the following code we want to test: k.key = self.name k.set_contents_from_string(self.value) -There are several method to do this, just keep in mind Moto creates a full blank environment. +There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment. Decorator ~~~~~~~~~ -With a decorator wrapping all the calls to S3 are automatically mocked out. +With a decorator wrapping, all the calls to S3 are automatically mocked out. .. sourcecode:: python @@ -66,7 +66,7 @@ With a decorator wrapping all the calls to S3 are automatically mocked out. Context manager ~~~~~~~~~~~~~~~ -Same as decorator, every call inside ``with`` statement are mocked out. +Same as the Decorator, every call inside the ``with`` statement is mocked out. .. sourcecode:: python @@ -83,7 +83,7 @@ Same as decorator, every call inside ``with`` statement are mocked out. Raw ~~~ -You can also start and stop manually the mocking. +You can also start and stop the mocking manually. .. sourcecode:: python @@ -104,11 +104,11 @@ You can also start and stop manually the mocking. Stand-alone server mode ~~~~~~~~~~~~~~~~~~~~~~~ -Moto comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. It is very useful to test even if you don't use Python. +Moto also comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. For testing purposes, it's extremely useful even if you don't use Python. .. sourcecode:: bash $ moto_server ec2 -p3000 * Running on http://127.0.0.1:3000/ -This method isn't encouraged if you're using ``boto``, best is to use decorator method. +However, this method isn't encouraged if you're using ``boto``, the best solution would be to use a decorator method. diff --git a/docs/index.rst b/docs/index.rst index 321342401..4811fb797 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -17,66 +17,95 @@ with ``moto`` and its usage. Currently implemented Services: ------------------------------- -+-----------------------+---------------------+-----------------------------------+ -| Service Name | Decorator | Development Status | -+=======================+=====================+===================================+ -| API Gateway | @mock_apigateway | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Autoscaling | @mock_autoscaling | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Cloudformation | @mock_cloudformation| core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Cloudwatch | @mock_cloudwatch | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Data Pipeline | @mock_datapipeline | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| - DynamoDB | - @mock_dynamodb | - core endpoints done | -| - DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes| -+-----------------------+---------------------+-----------------------------------+ -| EC2 | @mock_ec2 | core endpoints done | -| - AMI | | core endpoints done | -| - EBS | | core endpoints done | -| - Instances | | all endpoints done | -| - Security Groups | | core endpoints done | -| - Tags | | all endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| ECS | @mock_ecs | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| ELB | @mock_elb | core endpoints done | -| | @mock_elbv2 | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| EMR | @mock_emr | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Glacier | @mock_glacier | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| IAM | @mock_iam | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Lambda | @mock_lambda | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Kinesis | @mock_kinesis | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| KMS | @mock_kms | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| RDS | @mock_rds | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| RDS2 | @mock_rds2 | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Redshift | @mock_redshift | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| Route53 | @mock_route53 | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| S3 | @mock_s3 | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| SES | @mock_ses | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| SNS | @mock_sns | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| SQS | @mock_sqs | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| STS | @mock_sts | core endpoints done | -+-----------------------+---------------------+-----------------------------------+ -| SWF | @mock_swf | basic endpoints done | -+-----------------------+---------------------+-----------------------------------+ ++---------------------------+-----------------------+------------------------------------+ +| Service Name | Decorator | Development Status | ++===========================+=======================+====================================+ +| ACM | @mock_acm | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| API Gateway | @mock_apigateway | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Autoscaling | @mock_autoscaling | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Cloudformation | @mock_cloudformation | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Cloudwatch | @mock_cloudwatch | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| CloudwatchEvents | @mock_events | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Cognito Identity | @mock_cognitoidentity | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Cognito Identity Provider | @mock_cognitoidp | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Config | @mock_config | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Data Pipeline | @mock_datapipeline | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| DynamoDB | - @mock_dynamodb | - core endpoints done | +| DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes | ++---------------------------+-----------------------+------------------------------------+ +| EC2 | @mock_ec2 | core endpoints done | +| - AMI | | - core endpoints done | +| - EBS | | - core endpoints done | +| - Instances | | - all endpoints done | +| - Security Groups | | - core endpoints done | +| - Tags | | - all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| ECR | @mock_ecr | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| ECS | @mock_ecs | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| ELB | @mock_elb | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| ELBv2 | @mock_elbv2 | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| EMR | @mock_emr | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Glacier | @mock_glacier | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| IAM | @mock_iam | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| IoT | @mock_iot | core endpoints done | +| | @mock_iotdata | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Kinesis | @mock_kinesis | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| KMS | @mock_kms | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Lambda | @mock_lambda | basic endpoints done, | +| | | requires docker | ++---------------------------+-----------------------+------------------------------------+ +| Logs | @mock_logs | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Organizations | @mock_organizations | some core edpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Polly | @mock_polly | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| RDS | @mock_rds | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| RDS2 | @mock_rds2 | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Redshift | @mock_redshift | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| Route53 | @mock_route53 | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| S3 | @mock_s3 | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SecretsManager | @mock_secretsmanager | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SES | @mock_ses | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SNS | @mock_sns | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SQS | @mock_sqs | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SSM | @mock_ssm | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| STS | @mock_sts | core endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| SWF | @mock_swf | basic endpoints done | ++---------------------------+-----------------------+------------------------------------+ +| X-Ray | @mock_xray | all endpoints done | ++---------------------------+-----------------------+------------------------------------+ diff --git a/moto/__init__.py b/moto/__init__.py index 8a4b30979..8594cedd2 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.0.1' +__version__ = '1.3.14.dev' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa @@ -11,9 +11,13 @@ from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8 from .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa +from .cognitoidentity import mock_cognitoidentity, mock_cognitoidentity_deprecated # flake8: noqa +from .cognitoidp import mock_cognitoidp, mock_cognitoidp_deprecated # flake8: noqa +from .config import mock_config # flake8: noqa from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa +from .dynamodbstreams import mock_dynamodbstreams # flake8: noqa from .ec2 import mock_ec2, mock_ec2_deprecated # flake8: noqa from .ecr import mock_ecr, mock_ecr_deprecated # flake8: noqa from .ecs import mock_ecs, mock_ecs_deprecated # flake8: noqa @@ -22,16 +26,20 @@ from .elbv2 import mock_elbv2 # flake8: noqa from .emr import mock_emr, mock_emr_deprecated # flake8: noqa from .events import mock_events # flake8: noqa from .glacier import mock_glacier, mock_glacier_deprecated # flake8: noqa +from .glue import mock_glue # flake8: noqa from .iam import mock_iam, mock_iam_deprecated # flake8: noqa from .kinesis import mock_kinesis, mock_kinesis_deprecated # flake8: noqa from .kms import mock_kms, mock_kms_deprecated # flake8: noqa +from .organizations import mock_organizations # flake8: noqa from .opsworks import mock_opsworks, mock_opsworks_deprecated # flake8: noqa from .polly import mock_polly # flake8: noqa from .rds import mock_rds, mock_rds_deprecated # flake8: noqa from .rds2 import mock_rds2, mock_rds2_deprecated # flake8: noqa from .redshift import mock_redshift, mock_redshift_deprecated # flake8: noqa +from .resourcegroups import mock_resourcegroups # flake8: noqa from .s3 import mock_s3, mock_s3_deprecated # flake8: noqa from .ses import mock_ses, mock_ses_deprecated # flake8: noqa +from .secretsmanager import mock_secretsmanager # flake8: noqa from .sns import mock_sns, mock_sns_deprecated # flake8: noqa from .sqs import mock_sqs, mock_sqs_deprecated # flake8: noqa from .sts import mock_sts, mock_sts_deprecated # flake8: noqa @@ -41,6 +49,7 @@ from .swf import mock_swf, mock_swf_deprecated # flake8: noqa from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa from .logs import mock_logs, mock_logs_deprecated # flake8: noqa from .batch import mock_batch # flake8: noqa +from .resourcegroupstaggingapi import mock_resourcegroupstaggingapi # flake8: noqa from .iot import mock_iot # flake8: noqa from .iotdata import mock_iotdata # flake8: noqa diff --git a/moto/acm/models.py b/moto/acm/models.py index 39be8945d..15a1bd44d 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -243,7 +243,7 @@ class CertBundle(BaseModel): 'KeyAlgorithm': key_algo, 'NotAfter': datetime_to_epoch(self._cert.not_valid_after), 'NotBefore': datetime_to_epoch(self._cert.not_valid_before), - 'Serial': self._cert.serial, + 'Serial': self._cert.serial_number, 'SignatureAlgorithm': self._cert.signature_algorithm_oid._name.upper().replace('ENCRYPTION', ''), 'Status': self.status, # One of PENDING_VALIDATION, ISSUED, INACTIVE, EXPIRED, VALIDATION_TIMED_OUT, REVOKED, FAILED. 'Subject': 'CN={0}'.format(self.common_name), diff --git a/moto/acm/responses.py b/moto/acm/responses.py index 431a8cf60..38ebbaaa0 100644 --- a/moto/acm/responses.py +++ b/moto/acm/responses.py @@ -111,16 +111,16 @@ class AWSCertificateManagerResponse(BaseResponse): # actual data try: certificate = base64.standard_b64decode(certificate) - except: + except Exception: return AWSValidationException('The certificate is not PEM-encoded or is not valid.').response() try: private_key = base64.standard_b64decode(private_key) - except: + except Exception: return AWSValidationException('The private key is not PEM-encoded or is not valid.').response() if chain is not None: try: chain = base64.standard_b64decode(chain) - except: + except Exception: return AWSValidationException('The certificate chain is not PEM-encoded or is not valid.').response() try: diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index d4cf8d1c7..62fa24392 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -8,3 +8,11 @@ class StageNotFoundException(RESTError): def __init__(self): super(StageNotFoundException, self).__init__( "NotFoundException", "Invalid stage identifier specified") + + +class ApiKeyNotFoundException(RESTError): + code = 404 + + def __init__(self): + super(ApiKeyNotFoundException, self).__init__( + "NotFoundException", "Invalid API Key identifier specified") diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index e7ff98119..41a49e361 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -1,14 +1,17 @@ from __future__ import absolute_import from __future__ import unicode_literals -import datetime +import random +import string import requests +import time -from moto.packages.responses import responses +from boto3.session import Session +import responses from moto.core import BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_with_milliseconds from .utils import create_id -from .exceptions import StageNotFoundException +from moto.core.utils import path_url +from .exceptions import StageNotFoundException, ApiKeyNotFoundException STAGE_URL = "https://{api_id}.execute-api.{region_name}.amazonaws.com/{stage_name}" @@ -20,8 +23,7 @@ class Deployment(BaseModel, dict): self['id'] = deployment_id self['stageName'] = name self['description'] = description - self['createdDate'] = iso_8601_datetime_with_milliseconds( - datetime.datetime.now()) + self['createdDate'] = int(time.time()) class IntegrationResponse(BaseModel, dict): @@ -293,6 +295,44 @@ class Stage(BaseModel, dict): raise Exception('Patch operation "%s" not implemented' % op['op']) +class ApiKey(BaseModel, dict): + + def __init__(self, name=None, description=None, enabled=True, + generateDistinctId=False, value=None, stageKeys=None, customerId=None): + super(ApiKey, self).__init__() + self['id'] = create_id() + self['value'] = value if value else ''.join(random.sample(string.ascii_letters + string.digits, 40)) + self['name'] = name + self['customerId'] = customerId + self['description'] = description + self['enabled'] = enabled + self['createdDate'] = self['lastUpdatedDate'] = int(time.time()) + self['stageKeys'] = stageKeys + + +class UsagePlan(BaseModel, dict): + + def __init__(self, name=None, description=None, apiStages=[], + throttle=None, quota=None): + super(UsagePlan, self).__init__() + self['id'] = create_id() + self['name'] = name + self['description'] = description + self['apiStages'] = apiStages + self['throttle'] = throttle + self['quota'] = quota + + +class UsagePlanKey(BaseModel, dict): + + def __init__(self, id, type, name, value): + super(UsagePlanKey, self).__init__() + self['id'] = id + self['name'] = name + self['type'] = type + self['value'] = value + + class RestAPI(BaseModel): def __init__(self, id, region_name, name, description): @@ -300,7 +340,7 @@ class RestAPI(BaseModel): self.region_name = region_name self.name = name self.description = description - self.create_date = datetime.datetime.utcnow() + self.create_date = int(time.time()) self.deployments = {} self.stages = {} @@ -308,12 +348,15 @@ class RestAPI(BaseModel): self.resources = {} self.add_child('/') # Add default child + def __repr__(self): + return str(self.id) + def to_dict(self): return { "id": self.id, "name": self.name, "description": self.description, - "createdDate": iso_8601_datetime_with_milliseconds(self.create_date), + "createdDate": int(time.time()), } def add_child(self, path, parent_id=None): @@ -330,7 +373,8 @@ class RestAPI(BaseModel): # TODO deal with no matching resource def resource_callback(self, request): - path_after_stage_name = '/'.join(request.path_url.split("/")[2:]) + path = path_url(request.url) + path_after_stage_name = '/'.join(path.split("/")[2:]) if not path_after_stage_name: path_after_stage_name = '/' @@ -388,6 +432,9 @@ class APIGatewayBackend(BaseBackend): def __init__(self, region_name): super(APIGatewayBackend, self).__init__() self.apis = {} + self.keys = {} + self.usage_plans = {} + self.usage_plan_keys = {} self.region_name = region_name def reset(self): @@ -541,8 +588,71 @@ class APIGatewayBackend(BaseBackend): api = self.get_rest_api(function_id) return api.delete_deployment(deployment_id) + def create_apikey(self, payload): + key = ApiKey(**payload) + self.keys[key['id']] = key + return key + + def get_apikeys(self): + return list(self.keys.values()) + + def get_apikey(self, api_key_id): + return self.keys[api_key_id] + + def delete_apikey(self, api_key_id): + self.keys.pop(api_key_id) + return {} + + def create_usage_plan(self, payload): + plan = UsagePlan(**payload) + self.usage_plans[plan['id']] = plan + return plan + + def get_usage_plans(self, api_key_id=None): + plans = list(self.usage_plans.values()) + if api_key_id is not None: + plans = [ + plan + for plan in plans + if self.usage_plan_keys.get(plan['id'], {}).get(api_key_id, False) + ] + return plans + + def get_usage_plan(self, usage_plan_id): + return self.usage_plans[usage_plan_id] + + def delete_usage_plan(self, usage_plan_id): + self.usage_plans.pop(usage_plan_id) + return {} + + def create_usage_plan_key(self, usage_plan_id, payload): + if usage_plan_id not in self.usage_plan_keys: + self.usage_plan_keys[usage_plan_id] = {} + + key_id = payload["keyId"] + if key_id not in self.keys: + raise ApiKeyNotFoundException() + + api_key = self.keys[key_id] + + usage_plan_key = UsagePlanKey(id=key_id, type=payload["keyType"], name=api_key["name"], value=api_key["value"]) + self.usage_plan_keys[usage_plan_id][usage_plan_key['id']] = usage_plan_key + return usage_plan_key + + def get_usage_plan_keys(self, usage_plan_id): + if usage_plan_id not in self.usage_plan_keys: + return [] + + return list(self.usage_plan_keys[usage_plan_id].values()) + + def get_usage_plan_key(self, usage_plan_id, key_id): + return self.usage_plan_keys[usage_plan_id][key_id] + + def delete_usage_plan_key(self, usage_plan_id, key_id): + self.usage_plan_keys[usage_plan_id].pop(key_id) + return {} + apigateway_backends = {} -# Not available in boto yet -for region_name in ['us-east-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1']: +for region_name in Session().get_available_regions('apigateway'): apigateway_backends[region_name] = APIGatewayBackend(region_name) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 443fd4060..bc4d262cd 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -4,7 +4,7 @@ import json from moto.core.responses import BaseResponse from .models import apigateway_backends -from .exceptions import StageNotFoundException +from .exceptions import StageNotFoundException, ApiKeyNotFoundException class APIGatewayResponse(BaseResponse): @@ -226,3 +226,79 @@ class APIGatewayResponse(BaseResponse): deployment = self.backend.delete_deployment( function_id, deployment_id) return 200, {}, json.dumps(deployment) + + def apikeys(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + if self.method == 'POST': + apikey_response = self.backend.create_apikey(json.loads(self.body)) + elif self.method == 'GET': + apikeys_response = self.backend.get_apikeys() + return 200, {}, json.dumps({"item": apikeys_response}) + return 200, {}, json.dumps(apikey_response) + + def apikey_individual(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + url_path_parts = self.path.split("/") + apikey = url_path_parts[2] + + if self.method == 'GET': + apikey_response = self.backend.get_apikey(apikey) + elif self.method == 'DELETE': + apikey_response = self.backend.delete_apikey(apikey) + return 200, {}, json.dumps(apikey_response) + + def usage_plans(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + if self.method == 'POST': + usage_plan_response = self.backend.create_usage_plan(json.loads(self.body)) + elif self.method == 'GET': + api_key_id = self.querystring.get("keyId", [None])[0] + usage_plans_response = self.backend.get_usage_plans(api_key_id=api_key_id) + return 200, {}, json.dumps({"item": usage_plans_response}) + return 200, {}, json.dumps(usage_plan_response) + + def usage_plan_individual(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + url_path_parts = self.path.split("/") + usage_plan = url_path_parts[2] + + if self.method == 'GET': + usage_plan_response = self.backend.get_usage_plan(usage_plan) + elif self.method == 'DELETE': + usage_plan_response = self.backend.delete_usage_plan(usage_plan) + return 200, {}, json.dumps(usage_plan_response) + + def usage_plan_keys(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + url_path_parts = self.path.split("/") + usage_plan_id = url_path_parts[2] + + if self.method == 'POST': + try: + usage_plan_response = self.backend.create_usage_plan_key(usage_plan_id, json.loads(self.body)) + except ApiKeyNotFoundException as error: + return error.code, {}, '{{"message":"{0}","code":"{1}"}}'.format(error.message, error.error_type) + + elif self.method == 'GET': + usage_plans_response = self.backend.get_usage_plan_keys(usage_plan_id) + return 200, {}, json.dumps({"item": usage_plans_response}) + + return 200, {}, json.dumps(usage_plan_response) + + def usage_plan_key_individual(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + url_path_parts = self.path.split("/") + usage_plan_id = url_path_parts[2] + key_id = url_path_parts[4] + + if self.method == 'GET': + usage_plan_response = self.backend.get_usage_plan_key(usage_plan_id, key_id) + elif self.method == 'DELETE': + usage_plan_response = self.backend.delete_usage_plan_key(usage_plan_id, key_id) + return 200, {}, json.dumps(usage_plan_response) diff --git a/moto/apigateway/urls.py b/moto/apigateway/urls.py index 5637699e0..5c6d372fa 100644 --- a/moto/apigateway/urls.py +++ b/moto/apigateway/urls.py @@ -18,4 +18,10 @@ url_paths = { '{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/responses/(?P\d+)$': APIGatewayResponse().resource_method_responses, '{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/?$': APIGatewayResponse().integrations, '{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/responses/(?P\d+)/?$': APIGatewayResponse().integration_responses, + '{0}/apikeys$': APIGatewayResponse().apikeys, + '{0}/apikeys/(?P[^/]+)': APIGatewayResponse().apikey_individual, + '{0}/usageplans$': APIGatewayResponse().usage_plans, + '{0}/usageplans/(?P[^/]+)/?$': APIGatewayResponse().usage_plan_individual, + '{0}/usageplans/(?P[^/]+)/keys$': APIGatewayResponse().usage_plan_keys, + '{0}/usageplans/(?P[^/]+)/keys/(?P[^/]+)/?$': APIGatewayResponse().usage_plan_key_individual, } diff --git a/moto/apigateway/utils.py b/moto/apigateway/utils.py index 6d1e6ef19..31f8060b0 100644 --- a/moto/apigateway/utils.py +++ b/moto/apigateway/utils.py @@ -1,9 +1,10 @@ from __future__ import unicode_literals import six import random +import string def create_id(): size = 10 - chars = list(range(10)) + ['A-Z'] + chars = list(range(10)) + list(string.ascii_lowercase) return ''.join(six.text_type(random.choice(chars)) for x in range(size)) diff --git a/moto/autoscaling/exceptions.py b/moto/autoscaling/exceptions.py index 15b2e4f4a..74f62241d 100644 --- a/moto/autoscaling/exceptions.py +++ b/moto/autoscaling/exceptions.py @@ -3,12 +3,22 @@ from moto.core.exceptions import RESTError class AutoscalingClientError(RESTError): + code = 400 + + +class ResourceContentionError(RESTError): code = 500 - -class ResourceContentionError(AutoscalingClientError): - def __init__(self): super(ResourceContentionError, self).__init__( "ResourceContentionError", "You already have a pending update to an Auto Scaling resource (for example, a group, instance, or load balancer).") + + +class InvalidInstanceError(AutoscalingClientError): + + def __init__(self, instance_id): + super(InvalidInstanceError, self).__init__( + "ValidationError", + "Instance [{0}] is invalid." + .format(instance_id)) diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index ab99e4119..422075951 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -1,5 +1,10 @@ from __future__ import unicode_literals + +import random + from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping +from moto.ec2.exceptions import InvalidInstanceIdError + from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends @@ -7,7 +12,7 @@ from moto.elb import elb_backends from moto.elbv2 import elbv2_backends from moto.elb.exceptions import LoadBalancerNotFoundError from .exceptions import ( - ResourceContentionError, + AutoscalingClientError, ResourceContentionError, InvalidInstanceError ) # http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown @@ -17,10 +22,12 @@ ASG_NAME_TAG = "aws:autoscaling:groupName" class InstanceState(object): - def __init__(self, instance, lifecycle_state="InService", health_status="Healthy"): + def __init__(self, instance, lifecycle_state="InService", + health_status="Healthy", protected_from_scale_in=False): self.instance = instance self.lifecycle_state = lifecycle_state self.health_status = health_status + self.protected_from_scale_in = protected_from_scale_in class FakeScalingPolicy(BaseModel): @@ -68,6 +75,26 @@ class FakeLaunchConfiguration(BaseModel): self.associate_public_ip_address = associate_public_ip_address self.block_device_mapping_dict = block_device_mapping_dict + @classmethod + def create_from_instance(cls, name, instance, backend): + config = backend.create_launch_configuration( + name=name, + image_id=instance.image_id, + kernel_id='', + ramdisk_id='', + key_name=instance.key_name, + security_groups=instance.security_groups, + user_data=instance.user_data, + instance_type=instance.instance_type, + instance_monitoring=False, + instance_profile_name=None, + spot_price=None, + ebs_optimized=instance.ebs_optimized, + associate_public_ip_address=instance.associate_public_ip, + block_device_mappings=instance.block_device_mapping + ) + return config + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -152,17 +179,19 @@ class FakeAutoScalingGroup(BaseModel): min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, load_balancers, target_group_arns, placement_group, termination_policies, - autoscaling_backend, tags): + autoscaling_backend, tags, + new_instances_protected_from_scale_in=False): self.autoscaling_backend = autoscaling_backend self.name = name - self.availability_zones = availability_zones + + self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier) + self.max_size = max_size self.min_size = min_size self.launch_config = self.autoscaling_backend.launch_configurations[ launch_config_name] self.launch_config_name = launch_config_name - self.vpc_zone_identifier = vpc_zone_identifier self.default_cooldown = default_cooldown if default_cooldown else DEFAULT_COOLDOWN self.health_check_period = health_check_period @@ -171,11 +200,42 @@ class FakeAutoScalingGroup(BaseModel): self.target_group_arns = target_group_arns self.placement_group = placement_group self.termination_policies = termination_policies + self.new_instances_protected_from_scale_in = new_instances_protected_from_scale_in + self.suspended_processes = [] self.instance_states = [] self.tags = tags if tags else [] self.set_desired_capacity(desired_capacity) + def _set_azs_and_vpcs(self, availability_zones, vpc_zone_identifier, update=False): + # for updates, if only AZs are provided, they must not clash with + # the AZs of existing VPCs + if update and availability_zones and not vpc_zone_identifier: + vpc_zone_identifier = self.vpc_zone_identifier + + if vpc_zone_identifier: + # extract azs for vpcs + subnet_ids = vpc_zone_identifier.split(',') + subnets = self.autoscaling_backend.ec2_backend.get_all_subnets(subnet_ids=subnet_ids) + vpc_zones = [subnet.availability_zone for subnet in subnets] + + if availability_zones and set(availability_zones) != set(vpc_zones): + raise AutoscalingClientError( + "ValidationError", + "The availability zones of the specified subnets and the Auto Scaling group do not match", + ) + availability_zones = vpc_zones + elif not availability_zones: + if not update: + raise AutoscalingClientError( + "ValidationError", + "At least one Availability Zone or VPC Subnet is required." + ) + return + + self.availability_zones = availability_zones + self.vpc_zone_identifier = vpc_zone_identifier + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -202,6 +262,8 @@ class FakeAutoScalingGroup(BaseModel): placement_group=None, termination_policies=properties.get("TerminationPolicies", []), tags=properties.get("Tags", []), + new_instances_protected_from_scale_in=properties.get( + "NewInstancesProtectedFromScaleIn", False) ) return group @@ -230,24 +292,31 @@ class FakeAutoScalingGroup(BaseModel): def update(self, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, - placement_group, termination_policies): - if availability_zones: - self.availability_zones = availability_zones + placement_group, termination_policies, + new_instances_protected_from_scale_in=None): + self._set_azs_and_vpcs(availability_zones, vpc_zone_identifier, update=True) + if max_size is not None: self.max_size = max_size if min_size is not None: self.min_size = min_size + if desired_capacity is None: + if min_size is not None and min_size > len(self.instance_states): + desired_capacity = min_size + if max_size is not None and max_size < len(self.instance_states): + desired_capacity = max_size + if launch_config_name: self.launch_config = self.autoscaling_backend.launch_configurations[ launch_config_name] self.launch_config_name = launch_config_name - if vpc_zone_identifier is not None: - self.vpc_zone_identifier = vpc_zone_identifier if health_check_period is not None: self.health_check_period = health_check_period if health_check_type is not None: self.health_check_type = health_check_type + if new_instances_protected_from_scale_in is not None: + self.new_instances_protected_from_scale_in = new_instances_protected_from_scale_in if desired_capacity is not None: self.set_desired_capacity(desired_capacity) @@ -272,12 +341,16 @@ class FakeAutoScalingGroup(BaseModel): else: # Need to remove some instances count_to_remove = curr_instance_count - self.desired_capacity - instances_to_remove = self.instance_states[:count_to_remove] - instance_ids_to_remove = [ - instance.instance.id for instance in instances_to_remove] - self.autoscaling_backend.ec2_backend.terminate_instances( - instance_ids_to_remove) - self.instance_states = self.instance_states[count_to_remove:] + instances_to_remove = [ # only remove unprotected + state for state in self.instance_states + if not state.protected_from_scale_in + ][:count_to_remove] + if instances_to_remove: # just in case not instances to remove + instance_ids_to_remove = [ + instance.instance.id for instance in instances_to_remove] + self.autoscaling_backend.ec2_backend.terminate_instances( + instance_ids_to_remove) + self.instance_states = list(set(self.instance_states) - set(instances_to_remove)) def get_propagated_tags(self): propagated_tags = {} @@ -298,11 +371,15 @@ class FakeAutoScalingGroup(BaseModel): self.launch_config.user_data, self.launch_config.security_groups, instance_type=self.launch_config.instance_type, - tags={'instance': propagated_tags} + tags={'instance': propagated_tags}, + placement=random.choice(self.availability_zones), ) for instance in reservation.instances: instance.autoscaling_group = self - self.instance_states.append(InstanceState(instance)) + self.instance_states.append(InstanceState( + instance, + protected_from_scale_in=self.new_instances_protected_from_scale_in, + )) def append_target_groups(self, target_group_arns): append = [x for x in target_group_arns if x not in self.target_group_arns] @@ -364,7 +441,9 @@ class AutoScalingBackend(BaseBackend): default_cooldown, health_check_period, health_check_type, load_balancers, target_group_arns, placement_group, - termination_policies, tags): + termination_policies, tags, + new_instances_protected_from_scale_in=False, + instance_id=None): def make_int(value): return int(value) if value is not None else value @@ -377,6 +456,13 @@ class AutoScalingBackend(BaseBackend): health_check_period = 300 else: health_check_period = make_int(health_check_period) + if launch_config_name is None and instance_id is not None: + try: + instance = self.ec2_backend.get_instance(instance_id) + launch_config_name = name + FakeLaunchConfiguration.create_from_instance(launch_config_name, instance, self) + except InvalidInstanceIdError: + raise InvalidInstanceError(instance_id) group = FakeAutoScalingGroup( name=name, @@ -395,6 +481,7 @@ class AutoScalingBackend(BaseBackend): termination_policies=termination_policies, autoscaling_backend=self, tags=tags, + new_instances_protected_from_scale_in=new_instances_protected_from_scale_in, ) self.autoscaling_groups[name] = group @@ -407,12 +494,14 @@ class AutoScalingBackend(BaseBackend): launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, placement_group, - termination_policies): + termination_policies, + new_instances_protected_from_scale_in=None): group = self.autoscaling_groups[name] group.update(availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, - placement_group, termination_policies) + placement_group, termination_policies, + new_instances_protected_from_scale_in=new_instances_protected_from_scale_in) return group def describe_auto_scaling_groups(self, names): @@ -440,7 +529,13 @@ class AutoScalingBackend(BaseBackend): raise ResourceContentionError else: group.desired_capacity = original_size + len(instance_ids) - new_instances = [InstanceState(self.ec2_backend.get_instance(x)) for x in instance_ids] + new_instances = [ + InstanceState( + self.ec2_backend.get_instance(x), + protected_from_scale_in=group.new_instances_protected_from_scale_in, + ) + for x in instance_ids + ] for instance in new_instances: self.ec2_backend.create_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) group.instance_states.extend(new_instances) @@ -614,6 +709,29 @@ class AutoScalingBackend(BaseBackend): asg_targets = [{'id': x.instance.id} for x in group.instance_states] self.elbv2_backend.deregister_targets(target_group, (asg_targets)) + def suspend_processes(self, group_name, scaling_processes): + group = self.autoscaling_groups[group_name] + group.suspended_processes = scaling_processes or [] + + def set_instance_protection(self, group_name, instance_ids, protected_from_scale_in): + group = self.autoscaling_groups[group_name] + protected_instances = [ + x for x in group.instance_states if x.instance.id in instance_ids] + for instance in protected_instances: + instance.protected_from_scale_in = protected_from_scale_in + + def notify_terminate_instances(self, instance_ids): + for autoscaling_group_name, autoscaling_group in self.autoscaling_groups.items(): + original_instance_count = len(autoscaling_group.instance_states) + autoscaling_group.instance_states = list(filter( + lambda i_state: i_state.instance.id not in instance_ids, + autoscaling_group.instance_states + )) + difference = original_instance_count - len(autoscaling_group.instance_states) + if difference > 0: + autoscaling_group.replace_autoscaling_group_instances(difference, autoscaling_group.get_propagated_tags()) + self.update_attached_elbs(autoscaling_group_name) + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index d3f9ca483..5e409aafb 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -48,7 +48,7 @@ class AutoScalingResponse(BaseResponse): start = all_names.index(marker) + 1 else: start = 0 - max_records = self._get_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier + max_records = self._get_int_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier launch_configurations_resp = all_launch_configurations[start:start + max_records] next_token = None if len(all_launch_configurations) > start + max_records: @@ -74,6 +74,7 @@ class AutoScalingResponse(BaseResponse): desired_capacity=self._get_int_param('DesiredCapacity'), max_size=self._get_int_param('MaxSize'), min_size=self._get_int_param('MinSize'), + instance_id=self._get_param('InstanceId'), launch_config_name=self._get_param('LaunchConfigurationName'), vpc_zone_identifier=self._get_param('VPCZoneIdentifier'), default_cooldown=self._get_int_param('DefaultCooldown'), @@ -85,6 +86,8 @@ class AutoScalingResponse(BaseResponse): termination_policies=self._get_multi_param( 'TerminationPolicies.member'), tags=self._get_list_prefix('Tags.member'), + new_instances_protected_from_scale_in=self._get_bool_param( + 'NewInstancesProtectedFromScaleIn', False) ) template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -166,7 +169,7 @@ class AutoScalingResponse(BaseResponse): start = all_names.index(token) + 1 else: start = 0 - max_records = self._get_param("MaxRecords", 50) + max_records = self._get_int_param("MaxRecords", 50) if max_records > 100: raise ValueError groups = all_groups[start:start + max_records] @@ -192,6 +195,8 @@ class AutoScalingResponse(BaseResponse): placement_group=self._get_param('PlacementGroup'), termination_policies=self._get_multi_param( 'TerminationPolicies.member'), + new_instances_protected_from_scale_in=self._get_bool_param( + 'NewInstancesProtectedFromScaleIn', None) ) template = self.response_template(UPDATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -283,6 +288,22 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE) return template.render() + def suspend_processes(self): + autoscaling_group_name = self._get_param('AutoScalingGroupName') + scaling_processes = self._get_multi_param('ScalingProcesses.member') + self.autoscaling_backend.suspend_processes(autoscaling_group_name, scaling_processes) + template = self.response_template(SUSPEND_PROCESSES_TEMPLATE) + return template.render() + + def set_instance_protection(self): + group_name = self._get_param('AutoScalingGroupName') + instance_ids = self._get_multi_param('InstanceIds.member') + protected_from_scale_in = self._get_bool_param('ProtectedFromScaleIn') + self.autoscaling_backend.set_instance_protection( + group_name, instance_ids, protected_from_scale_in) + template = self.response_template(SET_INSTANCE_PROTECTION_TEMPLATE) + return template.render() + CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ @@ -313,8 +334,7 @@ DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """ {% endif %} {{ launch_configuration.instance_type }} - arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration: - 9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/{{ launch_configuration.name }} + arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration:9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/{{ launch_configuration.name }} {% if launch_configuration.block_device_mappings %} {% for mount_point, mapping in launch_configuration.block_device_mappings.items() %} @@ -385,7 +405,7 @@ ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """ -{{ requestid }} + """ @@ -393,7 +413,7 @@ ATTACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -435,7 +455,7 @@ DETACH_INSTANCES_TEMPLATE = """ -{{ requestid }} + """ @@ -463,7 +483,14 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {% endfor %} - + + {% for suspended_process in group.suspended_processes %} + + {{suspended_process}} + + + {% endfor %} + {{ group.name }} {{ group.health_check_type }} 2013-05-06T17:47:15.107Z @@ -473,10 +500,11 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {{ instance_state.health_status }} - us-east-1e + {{ instance_state.instance.placement }} {{ instance_state.instance.id }} {{ group.launch_config_name }} {{ instance_state.lifecycle_state }} + {{ instance_state.protected_from_scale_in|string|lower }} {% endfor %} @@ -495,6 +523,15 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {% endif %} + {% if group.target_group_arns %} + + {% for target_group_arn in group.target_group_arns %} + {{ target_group_arn }} + {% endfor %} + + {% else %} + + {% endif %} {{ group.min_size }} {% if group.vpc_zone_identifier %} {{ group.vpc_zone_identifier }} @@ -503,8 +540,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """{{ group.health_check_period }} {{ group.default_cooldown }} - arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb - :autoScalingGroupName/{{ group.name }} + arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb:autoScalingGroupName/{{ group.name }} {% if group.termination_policies %} {% for policy in group.termination_policies %} @@ -518,6 +554,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """{{ group.placement_group }} {% endif %} + {{ group.new_instances_protected_from_scale_in|string|lower }} {% endfor %} @@ -549,10 +586,11 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """ {{ instance_state.health_status }} {{ instance_state.instance.autoscaling_group.name }} - us-east-1e + {{ instance_state.instance.placement }} {{ instance_state.instance.id }} {{ instance_state.instance.autoscaling_group.launch_config_name }} {{ instance_state.lifecycle_state }} + {{ instance_state.protected_from_scale_in|string|lower }} {% endfor %} @@ -617,7 +655,7 @@ DELETE_POLICY_TEMPLATE = """ -{{ requestid }} + """ @@ -633,20 +671,33 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ -{{ requestid }} + """ +SUSPEND_PROCESSES_TEMPLATE = """ + + 7c6e177f-f082-11e1-ac58-3714bEXAMPLE + +""" + SET_INSTANCE_HEALTH_TEMPLATE = """ -{{ requestid }} + """ + +SET_INSTANCE_PROTECTION_TEMPLATE = """ + + + + +""" diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 6306acd5c..784d86b0b 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -2,7 +2,9 @@ from __future__ import unicode_literals import base64 from collections import defaultdict +import copy import datetime +import docker import docker.errors import hashlib import io @@ -17,18 +19,23 @@ import tarfile import calendar import threading import traceback +import weakref import requests.adapters import boto.awslambda from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import RESTError from moto.core.utils import unix_time_millis from moto.s3.models import s3_backend from moto.logs.models import logs_backends from moto.s3.exceptions import MissingBucket, MissingKey from moto import settings +from .utils import make_function_arn, make_function_ver_arn logger = logging.getLogger(__name__) +ACCOUNT_ID = '123456789012' + try: from tempfile import TemporaryDirectory @@ -38,6 +45,7 @@ except ImportError: _stderr_regex = re.compile(r'START|END|REPORT RequestId: .*') _orig_adapter_send = requests.adapters.HTTPAdapter.send +docker_3 = docker.__version__[0] >= '3' def zip2tar(zip_bytes): @@ -98,7 +106,11 @@ class _DockerDataVolumeContext: # It doesn't exist so we need to create it self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(self._lambda_func.code_sha_256) - container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: '/tmp/data'}, detach=True) + if docker_3: + volumes = {self.name: {'bind': '/tmp/data', 'mode': 'rw'}} + else: + volumes = {self.name: '/tmp/data'} + container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes=volumes, detach=True) try: tar_bytes = zip2tar(self._lambda_func.code_bytes) container.put_archive('/tmp/data', tar_bytes) @@ -121,7 +133,7 @@ class _DockerDataVolumeContext: class LambdaFunction(BaseModel): - def __init__(self, spec, region, validate_s3=True): + def __init__(self, spec, region, validate_s3=True, version=1): # required self.region = region self.code = spec['Code'] @@ -161,7 +173,7 @@ class LambdaFunction(BaseModel): 'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []}) # auto-generated - self.version = '$LATEST' + self.version = version self.last_modified = datetime.datetime.utcnow().strftime( '%Y-%m-%d %H:%M:%S') @@ -203,11 +215,15 @@ class LambdaFunction(BaseModel): self.code_size = key.size self.code_sha_256 = hashlib.sha256(key.value).hexdigest() - self.function_arn = 'arn:aws:lambda:{}:123456789012:function:{}'.format( - self.region, self.function_name) + self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name) self.tags = dict() + def set_version(self, version): + self.function_arn = make_function_ver_arn(self.region, ACCOUNT_ID, self.function_name, version) + self.version = version + self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + @property def vpc_config(self): config = self._vpc_config.copy() @@ -215,6 +231,10 @@ class LambdaFunction(BaseModel): config.update({"VpcId": "vpc-123abc"}) return config + @property + def physical_resource_id(self): + return self.function_name + def __repr__(self): return json.dumps(self.get_configuration()) @@ -231,7 +251,7 @@ class LambdaFunction(BaseModel): "Role": self.role, "Runtime": self.run_time, "Timeout": self.timeout, - "Version": self.version, + "Version": str(self.version), "VpcConfig": self.vpc_config, } @@ -255,14 +275,14 @@ class LambdaFunction(BaseModel): def convert(s): try: return str(s, encoding='utf-8') - except: + except Exception: return s @staticmethod def is_json(test_str): try: response = json.loads(test_str) - except: + except Exception: response = test_str return response @@ -304,6 +324,10 @@ class LambdaFunction(BaseModel): exit_code = -1 container.stop() container.kill() + else: + if docker_3: + exit_code = exit_code['StatusCode'] + output = container.logs(stdout=False, stderr=True) output += container.logs(stdout=True, stderr=False) container.remove() @@ -366,7 +390,7 @@ class LambdaFunction(BaseModel): 'Role': properties['Role'], 'Runtime': properties['Runtime'], } - optional_properties = 'Description MemorySize Publish Timeout VpcConfig'.split() + optional_properties = 'Description MemorySize Publish Timeout VpcConfig Environment'.split() # NOTE: Not doing `properties.get(k, DEFAULT)` to avoid duplicating the # default logic for prop in optional_properties: @@ -389,8 +413,7 @@ class LambdaFunction(BaseModel): from moto.cloudformation.exceptions import \ UnformattedGetAttTemplateException if attribute_name == 'Arn': - return 'arn:aws:lambda:{0}:123456789012:function:{1}'.format( - self.region, self.function_name) + return make_function_arn(self.region, ACCOUNT_ID, self.function_name) raise UnformattedGetAttTemplateException() @staticmethod @@ -436,6 +459,9 @@ class LambdaVersion(BaseModel): def __init__(self, spec): self.version = spec['Version'] + def __repr__(self): + return str(self.logical_resource_id) + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): @@ -446,9 +472,130 @@ class LambdaVersion(BaseModel): return LambdaVersion(spec) +class LambdaStorage(object): + def __init__(self): + # Format 'func_name' {'alias': {}, 'versions': []} + self._functions = {} + self._arns = weakref.WeakValueDictionary() + + def _get_latest(self, name): + return self._functions[name]['latest'] + + def _get_version(self, name, version): + index = version - 1 + + try: + return self._functions[name]['versions'][index] + except IndexError: + return None + + def _get_alias(self, name, alias): + return self._functions[name]['alias'].get(alias, None) + + def get_function(self, name, qualifier=None): + if name not in self._functions: + return None + + if qualifier is None: + return self._get_latest(name) + + try: + return self._get_version(name, int(qualifier)) + except ValueError: + return self._functions[name]['latest'] + + def list_versions_by_function(self, name): + if name not in self._functions: + return None + + latest = copy.copy(self._functions[name]['latest']) + latest.function_arn += ':$LATEST' + return [latest] + self._functions[name]['versions'] + + def get_arn(self, arn): + return self._arns.get(arn, None) + + def put_function(self, fn): + """ + :param fn: Function + :type fn: LambdaFunction + """ + if fn.function_name in self._functions: + self._functions[fn.function_name]['latest'] = fn + else: + self._functions[fn.function_name] = { + 'latest': fn, + 'versions': [], + 'alias': weakref.WeakValueDictionary() + } + + self._arns[fn.function_arn] = fn + + def publish_function(self, name): + if name not in self._functions: + return None + if not self._functions[name]['latest']: + return None + + new_version = len(self._functions[name]['versions']) + 1 + fn = copy.copy(self._functions[name]['latest']) + fn.set_version(new_version) + + self._functions[name]['versions'].append(fn) + self._arns[fn.function_arn] = fn + return fn + + def del_function(self, name, qualifier=None): + if name in self._functions: + if not qualifier: + # Something is still reffing this so delete all arns + latest = self._functions[name]['latest'].function_arn + del self._arns[latest] + + for fn in self._functions[name]['versions']: + del self._arns[fn.function_arn] + + del self._functions[name] + + return True + + elif qualifier == '$LATEST': + self._functions[name]['latest'] = None + + # If theres no functions left + if not self._functions[name]['versions'] and not self._functions[name]['latest']: + del self._functions[name] + + return True + + else: + fn = self.get_function(name, qualifier) + if fn: + self._functions[name]['versions'].remove(fn) + + # If theres no functions left + if not self._functions[name]['versions'] and not self._functions[name]['latest']: + del self._functions[name] + + return True + + return False + + def all(self): + result = [] + + for function_group in self._functions.values(): + if function_group['latest'] is not None: + result.append(function_group['latest']) + + result.extend(function_group['versions']) + + return result + + class LambdaBackend(BaseBackend): def __init__(self, region_name): - self._functions = {} + self._lambdas = LambdaStorage() self.region_name = region_name def reset(self): @@ -456,33 +603,39 @@ class LambdaBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def has_function(self, function_name): - return function_name in self._functions - - def has_function_arn(self, function_arn): - return self.get_function_by_arn(function_arn) is not None - def create_function(self, spec): - fn = LambdaFunction(spec, self.region_name) - self._functions[fn.function_name] = fn + function_name = spec.get('FunctionName', None) + if function_name is None: + raise RESTError('InvalidParameterValueException', 'Missing FunctionName') + + fn = LambdaFunction(spec, self.region_name, version='$LATEST') + + self._lambdas.put_function(fn) + + if spec.get('Publish'): + ver = self.publish_function(function_name) + fn.version = ver.version return fn - def get_function(self, function_name): - return self._functions[function_name] + def publish_function(self, function_name): + return self._lambdas.publish_function(function_name) + + def get_function(self, function_name, qualifier=None): + return self._lambdas.get_function(function_name, qualifier) + + def list_versions_by_function(self, function_name): + return self._lambdas.list_versions_by_function(function_name) def get_function_by_arn(self, function_arn): - for function in self._functions.values(): - if function.function_arn == function_arn: - return function - return None + return self._lambdas.get_arn(function_arn) - def delete_function(self, function_name): - del self._functions[function_name] + def delete_function(self, function_name, qualifier=None): + return self._lambdas.del_function(function_name, qualifier) def list_functions(self): - return self._functions.values() + return self._lambdas.all() - def send_message(self, function_name, message): + def send_message(self, function_name, message, subject=None, qualifier=None): event = { "Records": [ { @@ -509,29 +662,37 @@ class LambdaBackend(BaseBackend): "Type": "Notification", "UnsubscribeUrl": "EXAMPLE", "TopicArn": "arn:aws:sns:EXAMPLE", - "Subject": "TestInvoke" + "Subject": subject or "TestInvoke" } } ] } - self._functions[function_name].invoke(json.dumps(event), {}, {}) - pass + func = self._lambdas.get_function(function_name, qualifier) + func.invoke(json.dumps(event), {}, {}) def list_tags(self, resource): return self.get_function_by_arn(resource).tags def tag_resource(self, resource, tags): - self.get_function_by_arn(resource).tags.update(tags) + fn = self.get_function_by_arn(resource) + if not fn: + return False + + fn.tags.update(tags) + return True def untag_resource(self, resource, tagKeys): - function = self.get_function_by_arn(resource) - for key in tagKeys: - try: - del function.tags[key] - except KeyError: - pass - # Don't care + fn = self.get_function_by_arn(resource) + if fn: + for key in tagKeys: + try: + del fn.tags[key] + except KeyError: + pass + # Don't care + return True + return False def add_policy(self, function_name, policy): self.get_function(function_name).policy = policy @@ -546,3 +707,4 @@ lambda_backends = {_region.name: LambdaBackend(_region.name) for _region in boto.awslambda.regions()} lambda_backends['ap-southeast-2'] = LambdaBackend('ap-southeast-2') +lambda_backends['us-gov-west-1'] = LambdaBackend('us-gov-west-1') diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 4ba837ea2..c29c9acd9 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -1,19 +1,34 @@ from __future__ import unicode_literals import json -import re try: from urllib import unquote - from urlparse import urlparse, parse_qs -except: - from urllib.parse import unquote, urlparse, parse_qs +except ImportError: + from urllib.parse import unquote -from moto.core.utils import amz_crc32, amzn_request_id +from moto.core.utils import amz_crc32, amzn_request_id, path_url from moto.core.responses import BaseResponse +from .models import lambda_backends class LambdaResponse(BaseResponse): + @property + def json_body(self): + """ + :return: JSON + :rtype: dict + """ + return json.loads(self.body) + + @property + def lambda_backend(self): + """ + Get backend + :return: Lambda Backend + :rtype: moto.awslambda.models.LambdaBackend + """ + return lambda_backends[self.region] def root(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -33,6 +48,20 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + def versions(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + if request.method == 'GET': + # This is ListVersionByFunction + + path = request.path if hasattr(request, 'path') else path_url(request.url) + function_name = path.split('/')[-2] + return self._list_versions_by_function(function_name) + + elif request.method == 'POST': + return self._publish_function(request, full_url, headers) + else: + raise ValueError("Cannot handle request") + @amz_crc32 @amzn_request_id def invoke(self, request, full_url, headers): @@ -69,37 +98,32 @@ class LambdaResponse(BaseResponse): return self._add_policy(request, full_url, headers) def _add_policy(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - - path = request.path if hasattr(request, 'path') else request.path_url + path = request.path if hasattr(request, 'path') else path_url(request.url) function_name = path.split('/')[-2] - if lambda_backend.has_function(function_name): + if self.lambda_backend.get_function(function_name): policy = request.body.decode('utf8') - lambda_backend.add_policy(function_name, policy) + self.lambda_backend.add_policy(function_name, policy) return 200, {}, json.dumps(dict(Statement=policy)) else: return 404, {}, "{}" def _get_policy(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - - path = request.path if hasattr(request, 'path') else request.path_url + path = request.path if hasattr(request, 'path') else path_url(request.url) function_name = path.split('/')[-2] - if lambda_backend.has_function(function_name): - function = lambda_backend.get_function(function_name) - return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + function.policy + "]}")) + if self.lambda_backend.get_function(function_name): + lambda_function = self.lambda_backend.get_function(function_name) + return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + lambda_function.policy + "]}")) else: return 404, {}, "{}" def _invoke(self, request, full_url): response_headers = {} - lambda_backend = self.get_lambda_backend(full_url) - path = request.path if hasattr(request, 'path') else request.path_url - function_name = path.split('/')[-2] + function_name = self.path.rsplit('/', 2)[-2] + qualifier = self._get_param('qualifier') - if lambda_backend.has_function(function_name): - fn = lambda_backend.get_function(function_name) + fn = self.lambda_backend.get_function(function_name, qualifier) + if fn: payload = fn.invoke(self.body, self.headers, response_headers) response_headers['Content-Length'] = str(len(payload)) return 202, response_headers, payload @@ -108,109 +132,115 @@ class LambdaResponse(BaseResponse): def _invoke_async(self, request, full_url): response_headers = {} - lambda_backend = self.get_lambda_backend(full_url) - path = request.path if hasattr(request, 'path') else request.path_url - function_name = path.split('/')[-3] - if lambda_backend.has_function(function_name): - fn = lambda_backend.get_function(function_name) - fn.invoke(self.body, self.headers, response_headers) - response_headers['Content-Length'] = str(0) - return 202, response_headers, "" + function_name = self.path.rsplit('/', 3)[-3] + + fn = self.lambda_backend.get_function(function_name, None) + if fn: + payload = fn.invoke(self.body, self.headers, response_headers) + response_headers['Content-Length'] = str(len(payload)) + return 202, response_headers, payload else: return 404, response_headers, "{}" def _list_functions(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - return 200, {}, json.dumps({ - "Functions": [fn.get_configuration() for fn in lambda_backend.list_functions()], - # "NextMarker": str(uuid.uuid4()), - }) + result = { + 'Functions': [] + } + + for fn in self.lambda_backend.list_functions(): + json_data = fn.get_configuration() + json_data['Version'] = '$LATEST' + result['Functions'].append(json_data) + + return 200, {}, json.dumps(result) + + def _list_versions_by_function(self, function_name): + result = { + 'Versions': [] + } + + functions = self.lambda_backend.list_versions_by_function(function_name) + if functions: + for fn in functions: + json_data = fn.get_configuration() + result['Versions'].append(json_data) + + return 200, {}, json.dumps(result) def _create_function(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - spec = json.loads(self.body) try: - fn = lambda_backend.create_function(spec) + fn = self.lambda_backend.create_function(self.json_body) except ValueError as e: return 400, {}, json.dumps({"Error": {"Code": e.args[0], "Message": e.args[1]}}) else: config = fn.get_configuration() return 201, {}, json.dumps(config) + def _publish_function(self, request, full_url, headers): + function_name = self.path.rsplit('/', 2)[-2] + + fn = self.lambda_backend.publish_function(function_name) + if fn: + config = fn.get_configuration() + return 201, {}, json.dumps(config) + else: + return 404, {}, "{}" + def _delete_function(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) + function_name = self.path.rsplit('/', 1)[-1] + qualifier = self._get_param('Qualifier', None) - path = request.path if hasattr(request, 'path') else request.path_url - function_name = path.split('/')[-1] - - if lambda_backend.has_function(function_name): - lambda_backend.delete_function(function_name) + if self.lambda_backend.delete_function(function_name, qualifier): return 204, {}, "" else: return 404, {}, "{}" def _get_function(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) + function_name = self.path.rsplit('/', 1)[-1] + qualifier = self._get_param('Qualifier', None) - path = request.path if hasattr(request, 'path') else request.path_url - function_name = path.split('/')[-1] + fn = self.lambda_backend.get_function(function_name, qualifier) - if lambda_backend.has_function(function_name): - fn = lambda_backend.get_function(function_name) + if fn: code = fn.get_code() + if qualifier is None or qualifier == '$LATEST': + code['Configuration']['Version'] = '$LATEST' + if qualifier == '$LATEST': + code['Configuration']['FunctionArn'] += ':$LATEST' return 200, {}, json.dumps(code) else: return 404, {}, "{}" - def get_lambda_backend(self, full_url): - from moto.awslambda.models import lambda_backends - region = self._get_aws_region(full_url) - return lambda_backends[region] - def _get_aws_region(self, full_url): - region = re.search(self.region_regex, full_url) + region = self.region_regex.search(full_url) if region: return region.group(1) else: return self.default_region def _list_tags(self, request, full_url): - lambda_backend = self.get_lambda_backend(full_url) + function_arn = unquote(self.path.rsplit('/', 1)[-1]) - path = request.path if hasattr(request, 'path') else request.path_url - function_arn = unquote(path.split('/')[-1]) - - if lambda_backend.has_function_arn(function_arn): - function = lambda_backend.get_function_by_arn(function_arn) - return 200, {}, json.dumps(dict(Tags=function.tags)) + fn = self.lambda_backend.get_function_by_arn(function_arn) + if fn: + return 200, {}, json.dumps({'Tags': fn.tags}) else: return 404, {}, "{}" def _tag_resource(self, request, full_url): - lambda_backend = self.get_lambda_backend(full_url) + function_arn = unquote(self.path.rsplit('/', 1)[-1]) - path = request.path if hasattr(request, 'path') else request.path_url - function_arn = unquote(path.split('/')[-1]) - - spec = json.loads(self.body) - - if lambda_backend.has_function_arn(function_arn): - lambda_backend.tag_resource(function_arn, spec['Tags']) + if self.lambda_backend.tag_resource(function_arn, self.json_body['Tags']): return 200, {}, "{}" else: return 404, {}, "{}" def _untag_resource(self, request, full_url): - lambda_backend = self.get_lambda_backend(full_url) + function_arn = unquote(self.path.rsplit('/', 1)[-1]) + tag_keys = self.querystring['tagKeys'] - path = request.path if hasattr(request, 'path') else request.path_url - function_arn = unquote(path.split('/')[-1].split('?')[0]) - - tag_keys = parse_qs(urlparse(full_url).query)['tagKeys'] - - if lambda_backend.has_function_arn(function_arn): - lambda_backend.untag_resource(function_arn, tag_keys) + if self.lambda_backend.untag_resource(function_arn, tag_keys): return 204, {}, "{}" else: return 404, {}, "{}" diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index 005785f19..7c4d064dc 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -10,6 +10,7 @@ response = LambdaResponse() url_paths = { '{0}/(?P[^/]+)/functions/?$': response.root, r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/?$': response.function, + r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/versions/?$': response.versions, r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$': response.invoke, r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invoke-async/?$': response.invoke_async, r'{0}/(?P[^/]+)/tags/(?P.+)': response.tag, diff --git a/moto/awslambda/utils.py b/moto/awslambda/utils.py new file mode 100644 index 000000000..82027cb2f --- /dev/null +++ b/moto/awslambda/utils.py @@ -0,0 +1,20 @@ +from collections import namedtuple + +ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version']) + + +def make_function_arn(region, account, name): + return 'arn:aws:lambda:{0}:{1}:function:{2}'.format(region, account, name) + + +def make_function_ver_arn(region, account, name, version='1'): + arn = make_function_arn(region, account, name) + return '{0}:{1}'.format(arn, version) + + +def split_function_arn(arn): + arn = arn.replace('arn:aws:lambda:') + + region, account, _, name, version = arn.split(':') + + return ARN(region, account, name, version) diff --git a/moto/backends.py b/moto/backends.py index 771cd4018..6ea85093d 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -6,10 +6,13 @@ from moto.autoscaling import autoscaling_backends from moto.awslambda import lambda_backends from moto.cloudformation import cloudformation_backends from moto.cloudwatch import cloudwatch_backends +from moto.cognitoidentity import cognitoidentity_backends +from moto.cognitoidp import cognitoidp_backends from moto.core import moto_api_backends from moto.datapipeline import datapipeline_backends from moto.dynamodb import dynamodb_backends from moto.dynamodb2 import dynamodb_backends2 +from moto.dynamodbstreams import dynamodbstreams_backends from moto.ec2 import ec2_backends from moto.ecr import ecr_backends from moto.ecs import ecs_backends @@ -18,27 +21,33 @@ from moto.elbv2 import elbv2_backends from moto.emr import emr_backends from moto.events import events_backends from moto.glacier import glacier_backends +from moto.glue import glue_backends from moto.iam import iam_backends from moto.instance_metadata import instance_metadata_backends from moto.kinesis import kinesis_backends from moto.kms import kms_backends from moto.logs import logs_backends from moto.opsworks import opsworks_backends +from moto.organizations import organizations_backends from moto.polly import polly_backends from moto.rds2 import rds2_backends from moto.redshift import redshift_backends +from moto.resourcegroups import resourcegroups_backends from moto.route53 import route53_backends from moto.s3 import s3_backends from moto.ses import ses_backends +from moto.secretsmanager import secretsmanager_backends from moto.sns import sns_backends from moto.sqs import sqs_backends from moto.ssm import ssm_backends from moto.sts import sts_backends +from moto.swf import swf_backends from moto.xray import xray_backends from moto.iot import iot_backends from moto.iotdata import iotdata_backends from moto.batch import batch_backends - +from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends +from moto.config import config_backends BACKENDS = { 'acm': acm_backends, @@ -47,9 +56,13 @@ BACKENDS = { 'batch': batch_backends, 'cloudformation': cloudformation_backends, 'cloudwatch': cloudwatch_backends, + 'cognito-identity': cognitoidentity_backends, + 'cognito-idp': cognitoidp_backends, + 'config': config_backends, 'datapipeline': datapipeline_backends, 'dynamodb': dynamodb_backends, 'dynamodb2': dynamodb_backends2, + 'dynamodbstreams': dynamodbstreams_backends, 'ec2': ec2_backends, 'ecr': ecr_backends, 'ecs': ecs_backends, @@ -58,6 +71,7 @@ BACKENDS = { 'events': events_backends, 'emr': emr_backends, 'glacier': glacier_backends, + 'glue': glue_backends, 'iam': iam_backends, 'moto_api': moto_api_backends, 'instance_metadata': instance_metadata_backends, @@ -65,19 +79,24 @@ BACKENDS = { 'kinesis': kinesis_backends, 'kms': kms_backends, 'opsworks': opsworks_backends, + 'organizations': organizations_backends, 'polly': polly_backends, 'redshift': redshift_backends, + 'resource-groups': resourcegroups_backends, 'rds': rds2_backends, 's3': s3_backends, 's3bucket_path': s3_backends, 'ses': ses_backends, + 'secretsmanager': secretsmanager_backends, 'sns': sns_backends, 'sqs': sqs_backends, 'ssm': ssm_backends, 'sts': sts_backends, + 'swf': swf_backends, 'route53': route53_backends, 'lambda': lambda_backends, 'xray': xray_backends, + 'resourcegroupstaggingapi': resourcegroupstaggingapi_backends, 'iot': iot_backends, 'iot-data': iotdata_backends, } diff --git a/moto/batch/models.py b/moto/batch/models.py index 8b3b81ccb..c47ca6e97 100644 --- a/moto/batch/models.py +++ b/moto/batch/models.py @@ -295,6 +295,14 @@ class Job(threading.Thread, BaseModel): } if self.job_stopped: result['stoppedAt'] = datetime2int(self.job_stopped_at) + result['container'] = {} + result['container']['command'] = ['/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"'] + result['container']['privileged'] = False + result['container']['readonlyRootFilesystem'] = False + result['container']['ulimits'] = {} + result['container']['vcpus'] = 1 + result['container']['volumes'] = '' + result['container']['logStreamName'] = self.log_stream_name if self.job_stopped_reason is not None: result['statusReason'] = self.job_stopped_reason return result @@ -378,6 +386,7 @@ class Job(threading.Thread, BaseModel): # Send to cloudwatch log_group = '/aws/batch/job' stream_name = '{0}/default/{1}'.format(self.job_definition.name, self.job_id) + self.log_stream_name = stream_name self._log_backend.ensure_log_group(log_group, None) self._log_backend.create_log_stream(log_group, stream_name) self._log_backend.put_log_events(log_group, stream_name, logs, None) diff --git a/moto/batch/responses.py b/moto/batch/responses.py index e626b7d4c..7fb606184 100644 --- a/moto/batch/responses.py +++ b/moto/batch/responses.py @@ -27,7 +27,7 @@ class BatchResponse(BaseResponse): elif not hasattr(self, '_json'): try: self._json = json.loads(self.body) - except json.JSONDecodeError: + except ValueError: print() return self._json diff --git a/moto/cloudformation/exceptions.py b/moto/cloudformation/exceptions.py index 56a95382a..6ea15c5ca 100644 --- a/moto/cloudformation/exceptions.py +++ b/moto/cloudformation/exceptions.py @@ -33,6 +33,18 @@ class MissingParameterError(BadRequest): ) +class ExportNotFound(BadRequest): + """Exception to raise if a template tries to import a non-existent export""" + + def __init__(self, export_name): + template = Template(ERROR_RESPONSE) + super(ExportNotFound, self).__init__() + self.description = template.render( + code='ExportNotFound', + message="No export named {0} found.".format(export_name) + ) + + ERROR_RESPONSE = """ Sender diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index e579e4c08..01e3113dd 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -from datetime import datetime +from datetime import datetime, timedelta import json import yaml import uuid @@ -9,13 +9,162 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from .parsing import ResourceMap, OutputMap -from .utils import generate_stack_id, yaml_tag_constructor +from .utils import ( + generate_changeset_id, + generate_stack_id, + generate_stackset_arn, + generate_stackset_id, + yaml_tag_constructor, + validate_template_cfn_lint, +) from .exceptions import ValidationError +class FakeStackSet(BaseModel): + + def __init__(self, stackset_id, name, template, region='us-east-1', + status='ACTIVE', description=None, parameters=None, tags=None, + admin_role='AWSCloudFormationStackSetAdministrationRole', + execution_role='AWSCloudFormationStackSetExecutionRole'): + self.id = stackset_id + self.arn = generate_stackset_arn(stackset_id, region) + self.name = name + self.template = template + self.description = description + self.parameters = parameters + self.tags = tags + self.admin_role = admin_role + self.execution_role = execution_role + self.status = status + self.instances = FakeStackInstances(parameters, self.id, self.name) + self.stack_instances = self.instances.stack_instances + self.operations = [] + + def _create_operation(self, operation_id, action, status, accounts=[], regions=[]): + operation = { + 'OperationId': str(operation_id), + 'Action': action, + 'Status': status, + 'CreationTimestamp': datetime.now(), + 'EndTimestamp': datetime.now() + timedelta(minutes=2), + 'Instances': [{account: region} for account in accounts for region in regions], + } + + self.operations += [operation] + return operation + + def get_operation(self, operation_id): + for operation in self.operations: + if operation_id == operation['OperationId']: + return operation + raise ValidationError(operation_id) + + def update_operation(self, operation_id, status): + operation = self.get_operation(operation_id) + operation['Status'] = status + return operation_id + + def delete(self): + self.status = 'DELETED' + + def update(self, template, description, parameters, tags, admin_role, + execution_role, accounts, regions, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + + self.template = template if template else self.template + self.description = description if description is not None else self.description + self.parameters = parameters if parameters else self.parameters + self.tags = tags if tags else self.tags + self.admin_role = admin_role if admin_role else self.admin_role + self.execution_role = execution_role if execution_role else self.execution_role + + if accounts and regions: + self.update_instances(accounts, regions, self.parameters) + + operation = self._create_operation(operation_id=operation_id, + action='UPDATE', status='SUCCEEDED', accounts=accounts, + regions=regions) + return operation + + def create_stack_instances(self, accounts, regions, parameters, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + if not parameters: + parameters = self.parameters + + self.instances.create_instances(accounts, regions, parameters, operation_id) + self._create_operation(operation_id=operation_id, action='CREATE', + status='SUCCEEDED', accounts=accounts, regions=regions) + + def delete_stack_instances(self, accounts, regions, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + + self.instances.delete(accounts, regions) + + operation = self._create_operation(operation_id=operation_id, action='DELETE', + status='SUCCEEDED', accounts=accounts, regions=regions) + return operation + + def update_instances(self, accounts, regions, parameters, operation_id=None): + if not operation_id: + operation_id = uuid.uuid4() + + self.instances.update(accounts, regions, parameters) + operation = self._create_operation(operation_id=operation_id, + action='UPDATE', status='SUCCEEDED', accounts=accounts, + regions=regions) + return operation + + +class FakeStackInstances(BaseModel): + def __init__(self, parameters, stackset_id, stackset_name): + self.parameters = parameters if parameters else {} + self.stackset_id = stackset_id + self.stack_name = "StackSet-{}".format(stackset_id) + self.stackset_name = stackset_name + self.stack_instances = [] + + def create_instances(self, accounts, regions, parameters, operation_id): + new_instances = [] + for region in regions: + for account in accounts: + instance = { + 'StackId': generate_stack_id(self.stack_name, region, account), + 'StackSetId': self.stackset_id, + 'Region': region, + 'Account': account, + 'Status': "CURRENT", + 'ParameterOverrides': parameters if parameters else [], + } + new_instances.append(instance) + self.stack_instances += new_instances + return new_instances + + def update(self, accounts, regions, parameters): + for account in accounts: + for region in regions: + instance = self.get_instance(account, region) + if parameters: + instance['ParameterOverrides'] = parameters + else: + instance['ParameterOverrides'] = [] + + def delete(self, accounts, regions): + for i, instance in enumerate(self.stack_instances): + if instance['Region'] in regions and instance['Account'] in accounts: + self.stack_instances.pop(i) + + def get_instance(self, account, region): + for i, instance in enumerate(self.stack_instances): + if instance['Region'] == region and instance['Account'] == account: + return self.stack_instances[i] + + class FakeStack(BaseModel): - def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None): + def __init__(self, stack_id, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None, create_change_set=False): self.stack_id = stack_id self.name = name self.template = template @@ -26,11 +175,15 @@ class FakeStack(BaseModel): self.role_arn = role_arn self.tags = tags if tags else {} self.events = [] - self._add_stack_event("CREATE_IN_PROGRESS", - resource_status_reason="User Initiated") + if create_change_set: + self._add_stack_event("REVIEW_IN_PROGRESS", + resource_status_reason="User Initiated") + else: + self._add_stack_event("CREATE_IN_PROGRESS", + resource_status_reason="User Initiated") self.description = self.template_dict.get('Description') - self.cross_stack_resources = cross_stack_resources or [] + self.cross_stack_resources = cross_stack_resources or {} self.resource_map = self._create_resource_map() self.output_map = self._create_output_map() self._add_stack_event("CREATE_COMPLETE") @@ -76,9 +229,9 @@ class FakeStack(BaseModel): def _parse_template(self): yaml.add_multi_constructor('', yaml_tag_constructor) try: - self.template_dict = yaml.load(self.template) + self.template_dict = yaml.load(self.template, Loader=yaml.Loader) except yaml.parser.ParserError: - self.template_dict = json.loads(self.template) + self.template_dict = json.loads(self.template, Loader=yaml.Loader) @property def stack_parameters(self): @@ -99,7 +252,8 @@ class FakeStack(BaseModel): def update(self, template, role_arn=None, parameters=None, tags=None): self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") self.template = template - self.resource_map.update(json.loads(template), parameters) + self._parse_template() + self.resource_map.update(self.template_dict, parameters) self.output_map = self._create_output_map() self._add_stack_event("UPDATE_COMPLETE") self.status = "UPDATE_COMPLETE" @@ -117,6 +271,49 @@ class FakeStack(BaseModel): self.status = "DELETE_COMPLETE" +class FakeChange(BaseModel): + + def __init__(self, action, logical_resource_id, resource_type): + self.action = action + self.logical_resource_id = logical_resource_id + self.resource_type = resource_type + + +class FakeChangeSet(FakeStack): + + def __init__(self, stack_id, stack_name, stack_template, change_set_id, change_set_name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, cross_stack_resources=None): + super(FakeChangeSet, self).__init__( + stack_id, + stack_name, + stack_template, + parameters, + region_name, + notification_arns=notification_arns, + tags=tags, + role_arn=role_arn, + cross_stack_resources=cross_stack_resources, + create_change_set=True, + ) + self.stack_name = stack_name + self.change_set_id = change_set_id + self.change_set_name = change_set_name + self.changes = self.diff(template=template, parameters=parameters) + + def diff(self, template, parameters=None): + self.template = template + self._parse_template() + changes = [] + resources_by_action = self.resource_map.diff(self.template_dict, parameters) + for action, resources in resources_by_action.items(): + for resource_name, resource in resources.items(): + changes.append(FakeChange( + action=action, + logical_resource_id=resource_name, + resource_type=resource['ResourceType'], + )) + return changes + + class FakeEvent(BaseModel): def __init__(self, stack_id, stack_name, logical_resource_id, physical_resource_id, resource_type, resource_status, resource_status_reason=None, resource_properties=None): @@ -136,10 +333,73 @@ class CloudFormationBackend(BaseBackend): def __init__(self): self.stacks = OrderedDict() + self.stacksets = OrderedDict() self.deleted_stacks = {} self.exports = OrderedDict() + self.change_sets = OrderedDict() - def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None): + def create_stack_set(self, name, template, parameters, tags=None, description=None, region='us-east-1', admin_role=None, execution_role=None): + stackset_id = generate_stackset_id(name) + new_stackset = FakeStackSet( + stackset_id=stackset_id, + name=name, + template=template, + parameters=parameters, + description=description, + tags=tags, + admin_role=admin_role, + execution_role=execution_role, + ) + self.stacksets[stackset_id] = new_stackset + return new_stackset + + def get_stack_set(self, name): + stacksets = self.stacksets.keys() + for stackset in stacksets: + if self.stacksets[stackset].name == name: + return self.stacksets[stackset] + raise ValidationError(name) + + def delete_stack_set(self, name): + stacksets = self.stacksets.keys() + for stackset in stacksets: + if self.stacksets[stackset].name == name: + self.stacksets[stackset].delete() + + def create_stack_instances(self, stackset_name, accounts, regions, parameters, operation_id=None): + stackset = self.get_stack_set(stackset_name) + + stackset.create_stack_instances( + accounts=accounts, + regions=regions, + parameters=parameters, + operation_id=operation_id, + ) + return stackset + + def update_stack_set(self, stackset_name, template=None, description=None, + parameters=None, tags=None, admin_role=None, execution_role=None, + accounts=None, regions=None, operation_id=None): + stackset = self.get_stack_set(stackset_name) + update = stackset.update( + template=template, + description=description, + parameters=parameters, + tags=tags, + admin_role=admin_role, + execution_role=execution_role, + accounts=accounts, + regions=regions, + operation_id=operation_id + ) + return update + + def delete_stack_instances(self, stackset_name, accounts, regions, operation_id=None): + stackset = self.get_stack_set(stackset_name) + stackset.delete_stack_instances(accounts, regions, operation_id) + return stackset + + def create_stack(self, name, template, parameters, region_name, notification_arns=None, tags=None, role_arn=None, create_change_set=False): stack_id = generate_stack_id(name) new_stack = FakeStack( stack_id=stack_id, @@ -151,6 +411,7 @@ class CloudFormationBackend(BaseBackend): tags=tags, role_arn=role_arn, cross_stack_resources=self.exports, + create_change_set=create_change_set, ) self.stacks[stack_id] = new_stack self._validate_export_uniqueness(new_stack) @@ -158,6 +419,82 @@ class CloudFormationBackend(BaseBackend): self.exports[export.name] = export return new_stack + def create_change_set(self, stack_name, change_set_name, template, parameters, region_name, change_set_type, notification_arns=None, tags=None, role_arn=None): + stack_id = None + stack_template = None + if change_set_type == 'UPDATE': + stacks = self.stacks.values() + stack = None + for s in stacks: + if s.name == stack_name: + stack = s + stack_id = stack.stack_id + stack_template = stack.template + if stack is None: + raise ValidationError(stack_name) + else: + stack_id = generate_stack_id(stack_name) + stack_template = template + + change_set_id = generate_changeset_id(change_set_name, region_name) + new_change_set = FakeChangeSet( + stack_id=stack_id, + stack_name=stack_name, + stack_template=stack_template, + change_set_id=change_set_id, + change_set_name=change_set_name, + template=template, + parameters=parameters, + region_name=region_name, + notification_arns=notification_arns, + tags=tags, + role_arn=role_arn, + cross_stack_resources=self.exports + ) + self.change_sets[change_set_id] = new_change_set + self.stacks[stack_id] = new_change_set + return change_set_id, stack_id + + def delete_change_set(self, change_set_name, stack_name=None): + if change_set_name in self.change_sets: + # This means arn was passed in + del self.change_sets[change_set_name] + else: + for cs in self.change_sets: + if self.change_sets[cs].change_set_name == change_set_name: + del self.change_sets[cs] + + def describe_change_set(self, change_set_name, stack_name=None): + change_set = None + if change_set_name in self.change_sets: + # This means arn was passed in + change_set = self.change_sets[change_set_name] + else: + for cs in self.change_sets: + if self.change_sets[cs].change_set_name == change_set_name: + change_set = self.change_sets[cs] + if change_set is None: + raise ValidationError(change_set_name) + return change_set + + def execute_change_set(self, change_set_name, stack_name=None): + stack = None + if change_set_name in self.change_sets: + # This means arn was passed in + stack = self.change_sets[change_set_name] + else: + for cs in self.change_sets: + if self.change_sets[cs].change_set_name == change_set_name: + stack = self.change_sets[cs] + if stack is None: + raise ValidationError(stack_name) + if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS': + stack._add_stack_event('CREATE_COMPLETE') + else: + stack._add_stack_event('UPDATE_IN_PROGRESS') + stack._add_stack_event('UPDATE_COMPLETE') + return True + def describe_stacks(self, name_or_stack_id): stacks = self.stacks.values() if name_or_stack_id: @@ -173,8 +510,15 @@ class CloudFormationBackend(BaseBackend): else: return list(stacks) + def list_change_sets(self): + return self.change_sets.values() + def list_stacks(self): - return self.stacks.values() + return [ + v for v in self.stacks.values() + ] + [ + v for v in self.deleted_stacks.values() + ] def get_stack(self, name_or_stack_id): all_stacks = dict(self.deleted_stacks, **self.stacks) @@ -221,6 +565,9 @@ class CloudFormationBackend(BaseBackend): next_token = str(token + 100) if len(all_exports) > token + 100 else None return exports, next_token + def validate_template(self, template): + return validate_template_cfn_lint(template) + def _validate_export_uniqueness(self, stack): new_stack_export_names = [x.name for x in stack.exports] export_names = self.exports.keys() diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 1c13c5058..f2e03bd81 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -10,8 +10,9 @@ from moto.autoscaling import models as autoscaling_models from moto.awslambda import models as lambda_models from moto.batch import models as batch_models from moto.cloudwatch import models as cloudwatch_models +from moto.cognitoidentity import models as cognitoidentity_models from moto.datapipeline import models as datapipeline_models -from moto.dynamodb import models as dynamodb_models +from moto.dynamodb2 import models as dynamodb2_models from moto.ec2 import models as ec2_models from moto.ecs import models as ecs_models from moto.elb import models as elb_models @@ -27,7 +28,7 @@ from moto.s3 import models as s3_models from moto.sns import models as sns_models from moto.sqs import models as sqs_models from .utils import random_suffix -from .exceptions import MissingParameterError, UnformattedGetAttTemplateException, ValidationError +from .exceptions import ExportNotFound, MissingParameterError, UnformattedGetAttTemplateException, ValidationError from boto.cloudformation.stack import Output MODEL_MAP = { @@ -36,7 +37,7 @@ MODEL_MAP = { "AWS::Batch::JobDefinition": batch_models.JobDefinition, "AWS::Batch::JobQueue": batch_models.JobQueue, "AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment, - "AWS::DynamoDB::Table": dynamodb_models.Table, + "AWS::DynamoDB::Table": dynamodb2_models.Table, "AWS::Kinesis::Stream": kinesis_models.Stream, "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, "AWS::Lambda::Function": lambda_models.LambdaFunction, @@ -65,6 +66,7 @@ MODEL_MAP = { "AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer, "AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup, "AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener, + "AWS::Cognito::IdentityPool": cognitoidentity_models.CognitoIdentity, "AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline, "AWS::IAM::InstanceProfile": iam_models.InstanceProfile, "AWS::IAM::Role": iam_models.Role, @@ -94,6 +96,7 @@ NAME_TYPE_MAP = { "AWS::ElasticBeanstalk::Application": "ApplicationName", "AWS::ElasticBeanstalk::Environment": "EnvironmentName", "AWS::ElasticLoadBalancing::LoadBalancer": "LoadBalancerName", + "AWS::ElasticLoadBalancingV2::TargetGroup": "Name", "AWS::RDS::DBInstance": "DBInstanceIdentifier", "AWS::S3::Bucket": "BucketName", "AWS::SNS::Topic": "TopicName", @@ -106,6 +109,8 @@ NULL_MODELS = [ "AWS::CloudFormation::WaitConditionHandle", ] +DEFAULT_REGION = 'us-east-1' + logger = logging.getLogger("moto") @@ -202,6 +207,16 @@ def clean_json(resource_json, resources_map): values = [x.value for x in resources_map.cross_stack_resources.values() if x.name == cleaned_val] if any(values): return values[0] + else: + raise ExportNotFound(cleaned_val) + + if 'Fn::GetAZs' in resource_json: + region = resource_json.get('Fn::GetAZs') or DEFAULT_REGION + result = [] + # TODO: make this configurable, to reflect the real AWS AZs + for az in ('a', 'b', 'c', 'd'): + result.append('%s%s' % (region, az)) + return result cleaned_json = {} for key, value in resource_json.items(): @@ -230,6 +245,23 @@ def resource_name_property_from_type(resource_type): return NAME_TYPE_MAP.get(resource_type) +def generate_resource_name(resource_type, stack_name, logical_id): + if resource_type in ["AWS::ElasticLoadBalancingV2::TargetGroup", + "AWS::ElasticLoadBalancingV2::LoadBalancer"]: + # Target group names need to be less than 32 characters, so when cloudformation creates a name for you + # it makes sure to stay under that limit + name_prefix = '{0}-{1}'.format(stack_name, logical_id) + my_random_suffix = random_suffix() + truncated_name_prefix = name_prefix[0:32 - (len(my_random_suffix) + 1)] + # if the truncated name ends in a dash, we'll end up with a double dash in the final name, which is + # not allowed + if truncated_name_prefix.endswith('-'): + truncated_name_prefix = truncated_name_prefix[:-1] + return '{0}-{1}'.format(truncated_name_prefix, my_random_suffix) + else: + return '{0}-{1}-{2}'.format(stack_name, logical_id, random_suffix()) + + def parse_resource(logical_id, resource_json, resources_map): resource_type = resource_json['Type'] resource_class = resource_class_from_type(resource_type) @@ -244,15 +276,12 @@ def parse_resource(logical_id, resource_json, resources_map): if 'Properties' not in resource_json: resource_json['Properties'] = dict() if resource_name_property not in resource_json['Properties']: - resource_json['Properties'][resource_name_property] = '{0}-{1}-{2}'.format( - resources_map.get('AWS::StackName'), - logical_id, - random_suffix()) + resource_json['Properties'][resource_name_property] = generate_resource_name( + resource_type, resources_map.get('AWS::StackName'), logical_id) resource_name = resource_json['Properties'][resource_name_property] else: - resource_name = '{0}-{1}-{2}'.format(resources_map.get('AWS::StackName'), - logical_id, - random_suffix()) + resource_name = generate_resource_name(resource_type, resources_map.get('AWS::StackName'), logical_id) + return resource_class, resource_json, resource_name @@ -357,7 +386,9 @@ class ResourceMap(collections.Mapping): "AWS::Region": self._region_name, "AWS::StackId": stack_id, "AWS::StackName": stack_name, + "AWS::URLSuffix": "amazonaws.com", "AWS::NoValue": None, + "AWS::Partition": "aws", } def __getitem__(self, key): @@ -395,11 +426,18 @@ class ResourceMap(collections.Mapping): self.resolved_parameters[parameter_name] = parameter.get('Default') # Set any input parameters that were passed + self.no_echo_parameter_keys = [] for key, value in self.input_parameters.items(): if key in self.resolved_parameters: - value_type = parameter_slots[key].get('Type', 'String') + parameter_slot = parameter_slots[key] + + value_type = parameter_slot.get('Type', 'String') if value_type == 'CommaDelimitedList' or value_type.startswith("List"): value = value.split(',') + + if parameter_slot.get('NoEcho'): + self.no_echo_parameter_keys.append(key) + self.resolved_parameters[key] = value # Check if there are any non-default params that were not passed input @@ -435,36 +473,70 @@ class ResourceMap(collections.Mapping): ec2_models.ec2_backends[self._region_name].create_tags( [self[resource].physical_resource_id], self.tags) - def update(self, template, parameters=None): + def diff(self, template, parameters=None): if parameters: self.input_parameters = parameters self.load_mapping() self.load_parameters() self.load_conditions() + old_template = self._resource_json_map + new_template = template['Resources'] + + resource_names_by_action = { + 'Add': set(new_template) - set(old_template), + 'Modify': set(name for name in new_template if name in old_template and new_template[ + name] != old_template[name]), + 'Remove': set(old_template) - set(new_template) + } + resources_by_action = { + 'Add': {}, + 'Modify': {}, + 'Remove': {}, + } + + for resource_name in resource_names_by_action['Add']: + resources_by_action['Add'][resource_name] = { + 'LogicalResourceId': resource_name, + 'ResourceType': new_template[resource_name]['Type'] + } + + for resource_name in resource_names_by_action['Modify']: + resources_by_action['Modify'][resource_name] = { + 'LogicalResourceId': resource_name, + 'ResourceType': new_template[resource_name]['Type'] + } + + for resource_name in resource_names_by_action['Remove']: + resources_by_action['Remove'][resource_name] = { + 'LogicalResourceId': resource_name, + 'ResourceType': old_template[resource_name]['Type'] + } + + return resources_by_action + + def update(self, template, parameters=None): + resources_by_action = self.diff(template, parameters) + old_template = self._resource_json_map new_template = template['Resources'] self._resource_json_map = new_template - new_resource_names = set(new_template) - set(old_template) - for resource_name in new_resource_names: + for resource_name, resource in resources_by_action['Add'].items(): resource_json = new_template[resource_name] new_resource = parse_and_create_resource( resource_name, resource_json, self, self._region_name) self._parsed_resources[resource_name] = new_resource - removed_resource_nams = set(old_template) - set(new_template) - for resource_name in removed_resource_nams: + for resource_name, resource in resources_by_action['Remove'].items(): resource_json = old_template[resource_name] parse_and_delete_resource( resource_name, resource_json, self, self._region_name) self._parsed_resources.pop(resource_name) - resources_to_update = set(name for name in new_template if name in old_template and new_template[ - name] != old_template[name]) tries = 1 - while resources_to_update and tries < 5: - for resource_name in resources_to_update.copy(): + while resources_by_action['Modify'] and tries < 5: + for resource_name, resource in resources_by_action['Modify'].copy().items(): resource_json = new_template[resource_name] try: changed_resource = parse_and_update_resource( @@ -475,7 +547,7 @@ class ResourceMap(collections.Mapping): last_exception = e else: self._parsed_resources[resource_name] = changed_resource - resources_to_update.remove(resource_name) + del resources_by_action['Modify'][resource_name] tries += 1 if tries == 5: raise last_exception diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index a5b251b89..80970262f 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -1,9 +1,11 @@ from __future__ import unicode_literals import json +import yaml from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse +from moto.core.utils import amzn_request_id from moto.s3 import s3_backend from .models import cloudformation_backends from .exceptions import ValidationError @@ -77,6 +79,90 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(CREATE_STACK_RESPONSE_TEMPLATE) return template.render(stack=stack) + @amzn_request_id + def create_change_set(self): + stack_name = self._get_param('StackName') + change_set_name = self._get_param('ChangeSetName') + stack_body = self._get_param('TemplateBody') + template_url = self._get_param('TemplateURL') + role_arn = self._get_param('RoleARN') + update_or_create = self._get_param('ChangeSetType', 'CREATE') + parameters_list = self._get_list_prefix("Parameters.member") + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) + parameters = {param['parameter_key']: param['parameter_value'] + for param in parameters_list} + if template_url: + stack_body = self._get_stack_from_s3_url(template_url) + stack_notification_arns = self._get_multi_param( + 'NotificationARNs.member') + change_set_id, stack_id = self.cloudformation_backend.create_change_set( + stack_name=stack_name, + change_set_name=change_set_name, + template=stack_body, + parameters=parameters, + region_name=self.region, + notification_arns=stack_notification_arns, + tags=tags, + role_arn=role_arn, + change_set_type=update_or_create, + ) + if self.request_json: + return json.dumps({ + 'CreateChangeSetResponse': { + 'CreateChangeSetResult': { + 'Id': change_set_id, + 'StackId': stack_id, + } + } + }) + else: + template = self.response_template(CREATE_CHANGE_SET_RESPONSE_TEMPLATE) + return template.render(stack_id=stack_id, change_set_id=change_set_id) + + def delete_change_set(self): + stack_name = self._get_param('StackName') + change_set_name = self._get_param('ChangeSetName') + + self.cloudformation_backend.delete_change_set(change_set_name=change_set_name, stack_name=stack_name) + if self.request_json: + return json.dumps({ + 'DeleteChangeSetResponse': { + 'DeleteChangeSetResult': {}, + } + }) + else: + template = self.response_template(DELETE_CHANGE_SET_RESPONSE_TEMPLATE) + return template.render() + + def describe_change_set(self): + stack_name = self._get_param('StackName') + change_set_name = self._get_param('ChangeSetName') + change_set = self.cloudformation_backend.describe_change_set( + change_set_name=change_set_name, + stack_name=stack_name, + ) + template = self.response_template(DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE) + return template.render(change_set=change_set) + + @amzn_request_id + def execute_change_set(self): + stack_name = self._get_param('StackName') + change_set_name = self._get_param('ChangeSetName') + self.cloudformation_backend.execute_change_set( + stack_name=stack_name, + change_set_name=change_set_name, + ) + if self.request_json: + return json.dumps({ + 'ExecuteChangeSetResponse': { + 'ExecuteChangeSetResult': {}, + } + }) + else: + template = self.response_template(EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE) + return template.render() + def describe_stacks(self): stack_name_or_id = None if self._get_param('StackName'): @@ -126,6 +212,11 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(DESCRIBE_STACK_EVENTS_RESPONSE) return template.render(stack=stack) + def list_change_sets(self): + change_sets = self.cloudformation_backend.list_change_sets() + template = self.response_template(LIST_CHANGE_SETS_RESPONSE) + return template.render(change_sets=change_sets) + def list_stacks(self): stacks = self.cloudformation_backend.list_stacks() template = self.response_template(LIST_STACKS_RESPONSE) @@ -161,16 +252,26 @@ class CloudFormationResponse(BaseResponse): def update_stack(self): stack_name = self._get_param('StackName') role_arn = self._get_param('RoleARN') + template_url = self._get_param('TemplateURL') + stack_body = self._get_param('TemplateBody') + stack = self.cloudformation_backend.get_stack(stack_name) if self._get_param('UsePreviousTemplate') == "true": - stack_body = self.cloudformation_backend.get_stack( - stack_name).template - else: - stack_body = self._get_param('TemplateBody') + stack_body = stack.template + elif not stack_body and template_url: + stack_body = self._get_stack_from_s3_url(template_url) + + incoming_params = self._get_list_prefix("Parameters.member") parameters = dict([ (parameter['parameter_key'], parameter['parameter_value']) for parameter - in self._get_list_prefix("Parameters.member") + in incoming_params if 'parameter_value' in parameter ]) + previous = dict([ + (parameter['parameter_key'], stack.parameters[parameter['parameter_key']]) + for parameter + in incoming_params if 'use_previous_value' in parameter + ]) + parameters.update(previous) # boto3 is supposed to let you clear the tags by passing an empty value, but the request body doesn't # end up containing anything we can use to differentiate between passing an empty value versus not # passing anything. so until that changes, moto won't be able to clear tags, only update them. @@ -225,6 +326,201 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(LIST_EXPORTS_RESPONSE) return template.render(exports=exports, next_token=next_token) + def validate_template(self): + cfn_lint = self.cloudformation_backend.validate_template(self._get_param('TemplateBody')) + if cfn_lint: + raise ValidationError(cfn_lint[0].message) + description = "" + try: + description = json.loads(self._get_param('TemplateBody'))['Description'] + except (ValueError, KeyError): + pass + try: + description = yaml.load(self._get_param('TemplateBody'))['Description'] + except (yaml.ParserError, KeyError): + pass + template = self.response_template(VALIDATE_STACK_RESPONSE_TEMPLATE) + return template.render(description=description) + + def create_stack_set(self): + stackset_name = self._get_param('StackSetName') + stack_body = self._get_param('TemplateBody') + template_url = self._get_param('TemplateURL') + # role_arn = self._get_param('RoleARN') + parameters_list = self._get_list_prefix("Parameters.member") + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) + + # Copy-Pasta - Hack dict-comprehension + parameters = dict([ + (parameter['parameter_key'], parameter['parameter_value']) + for parameter + in parameters_list + ]) + if template_url: + stack_body = self._get_stack_from_s3_url(template_url) + + stackset = self.cloudformation_backend.create_stack_set( + name=stackset_name, + template=stack_body, + parameters=parameters, + tags=tags, + # role_arn=role_arn, + ) + if self.request_json: + return json.dumps({ + 'CreateStackSetResponse': { + 'CreateStackSetResult': { + 'StackSetId': stackset.stackset_id, + } + } + }) + else: + template = self.response_template(CREATE_STACK_SET_RESPONSE_TEMPLATE) + return template.render(stackset=stackset) + + def create_stack_instances(self): + stackset_name = self._get_param('StackSetName') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + parameters = self._get_multi_param('ParameterOverrides.member') + self.cloudformation_backend.create_stack_instances(stackset_name, accounts, regions, parameters) + template = self.response_template(CREATE_STACK_INSTANCES_TEMPLATE) + return template.render() + + def delete_stack_set(self): + stackset_name = self._get_param('StackSetName') + self.cloudformation_backend.delete_stack_set(stackset_name) + template = self.response_template(DELETE_STACK_SET_RESPONSE_TEMPLATE) + return template.render() + + def delete_stack_instances(self): + stackset_name = self._get_param('StackSetName') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + operation = self.cloudformation_backend.delete_stack_instances(stackset_name, accounts, regions) + + template = self.response_template(DELETE_STACK_INSTANCES_TEMPLATE) + return template.render(operation=operation) + + def describe_stack_set(self): + stackset_name = self._get_param('StackSetName') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + + if not stackset.admin_role: + stackset.admin_role = 'arn:aws:iam::123456789012:role/AWSCloudFormationStackSetAdministrationRole' + if not stackset.execution_role: + stackset.execution_role = 'AWSCloudFormationStackSetExecutionRole' + + template = self.response_template(DESCRIBE_STACK_SET_RESPONSE_TEMPLATE) + return template.render(stackset=stackset) + + def describe_stack_instance(self): + stackset_name = self._get_param('StackSetName') + account = self._get_param('StackInstanceAccount') + region = self._get_param('StackInstanceRegion') + + instance = self.cloudformation_backend.get_stack_set(stackset_name).instances.get_instance(account, region) + template = self.response_template(DESCRIBE_STACK_INSTANCE_TEMPLATE) + rendered = template.render(instance=instance) + return rendered + + def list_stack_sets(self): + stacksets = self.cloudformation_backend.stacksets + template = self.response_template(LIST_STACK_SETS_TEMPLATE) + return template.render(stacksets=stacksets) + + def list_stack_instances(self): + stackset_name = self._get_param('StackSetName') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + template = self.response_template(LIST_STACK_INSTANCES_TEMPLATE) + return template.render(stackset=stackset) + + def list_stack_set_operations(self): + stackset_name = self._get_param('StackSetName') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + template = self.response_template(LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE) + return template.render(stackset=stackset) + + def stop_stack_set_operation(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + stackset.update_operation(operation_id, 'STOPPED') + template = self.response_template(STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE) + return template.render() + + def describe_stack_set_operation(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + operation = stackset.get_operation(operation_id) + template = self.response_template(DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE) + return template.render(stackset=stackset, operation=operation) + + def list_stack_set_operation_results(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + stackset = self.cloudformation_backend.get_stack_set(stackset_name) + operation = stackset.get_operation(operation_id) + template = self.response_template(LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE) + return template.render(operation=operation) + + def update_stack_set(self): + stackset_name = self._get_param('StackSetName') + operation_id = self._get_param('OperationId') + description = self._get_param('Description') + execution_role = self._get_param('ExecutionRoleName') + admin_role = self._get_param('AdministrationRoleARN') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + template_body = self._get_param('TemplateBody') + template_url = self._get_param('TemplateURL') + if template_url: + template_body = self._get_stack_from_s3_url(template_url) + tags = dict((item['key'], item['value']) + for item in self._get_list_prefix("Tags.member")) + parameters_list = self._get_list_prefix("Parameters.member") + parameters = dict([ + (parameter['parameter_key'], parameter['parameter_value']) + for parameter + in parameters_list + ]) + operation = self.cloudformation_backend.update_stack_set( + stackset_name=stackset_name, + template=template_body, + description=description, + parameters=parameters, + tags=tags, + admin_role=admin_role, + execution_role=execution_role, + accounts=accounts, + regions=regions, + operation_id=operation_id + ) + + template = self.response_template(UPDATE_STACK_SET_RESPONSE_TEMPLATE) + return template.render(operation=operation) + + def update_stack_instances(self): + stackset_name = self._get_param('StackSetName') + accounts = self._get_multi_param('Accounts.member') + regions = self._get_multi_param('Regions.member') + parameters = self._get_multi_param('ParameterOverrides.member') + operation = self.cloudformation_backend.get_stack_set(stackset_name).update_instances(accounts, regions, parameters) + template = self.response_template(UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE) + return template.render(operation=operation) + + +VALIDATE_STACK_RESPONSE_TEMPLATE = """ + + + + +{{ description }} + + +""" CREATE_STACK_RESPONSE_TEMPLATE = """ @@ -246,6 +542,87 @@ UPDATE_STACK_RESPONSE_TEMPLATE = """ + + {{change_set_id}} + {{ stack_id }} + + + {{ request_id }} + + +""" + +DELETE_CHANGE_SET_RESPONSE_TEMPLATE = """ + + + + 3d3200a1-810e-3023-6cc3-example + + +""" + +DESCRIBE_CHANGE_SET_RESPONSE_TEMPLATE = """ + + {{ change_set.change_set_id }} + {{ change_set.change_set_name }} + {{ change_set.stack_id }} + {{ change_set.stack_name }} + {{ change_set.description }} + + {% for param_name, param_value in change_set.stack_parameters.items() %} + + {{ param_name }} + {{ param_value }} + + {% endfor %} + + 2011-05-23T15:47:44Z + {{ change_set.execution_status }} + {{ change_set.status }} + {{ change_set.status_reason }} + {% if change_set.notification_arns %} + + {% for notification_arn in change_set.notification_arns %} + {{ notification_arn }} + {% endfor %} + + {% else %} + + {% endif %} + {% if change_set.role_arn %} + {{ change_set.role_arn }} + {% endif %} + {% if change_set.changes %} + + {% for change in change_set.changes %} + + Resource + + {{ change.action }} + {{ change.logical_resource_id }} + {{ change.resource_type }} + + + {% endfor %} + + {% endif %} + {% if next_token %} + {{ next_token }} + {% endif %} + +""" + +EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE = """ + + + + + {{ request_id }} + + +""" + DESCRIBE_STACKS_TEMPLATE = """ @@ -277,7 +654,11 @@ DESCRIBE_STACKS_TEMPLATE = """ {% for param_name, param_value in stack.stack_parameters.items() %} {{ param_name }} - {{ param_value }} + {% if param_name in stack.resource_map.no_echo_parameter_keys %} + **** + {% else %} + {{ param_value }} + {% endif %} {% endfor %} @@ -361,6 +742,27 @@ DESCRIBE_STACK_EVENTS_RESPONSE = """ + + + {% for change_set in change_sets %} + + {{ change_set.stack_id }} + {{ change_set.stack_name }} + {{ change_set.change_set_id }} + {{ change_set.change_set_name }} + {{ change_set.execution_status }} + {{ change_set.status }} + {{ change_set.status_reason }} + 2011-05-23T15:47:44Z + {{ change_set.description }} + + {% endfor %} + + +""" + + LIST_STACKS_RESPONSE = """ @@ -435,3 +837,236 @@ LIST_EXPORTS_RESPONSE = """ + + {{ stackset.stackset_id }} + + + f457258c-391d-41d1-861f-example + + +""" + +DESCRIBE_STACK_SET_RESPONSE_TEMPLATE = """ + + + + {{ stackset.arn }} + {{ stackset.execution_role }} + {{ stackset.admin_role }} + {{ stackset.id }} + {{ stackset.template }} + {{ stackset.name }} + + {% for param_name, param_value in stackset.parameters.items() %} + + {{ param_name }} + {{ param_value }} + + {% endfor %} + + + {% for tag_key, tag_value in stackset.tags.items() %} + + {{ tag_key }} + {{ tag_value }} + + {% endfor %} + + {{ stackset.status }} + + + + d8b64e11-5332-46e1-9603-example + +""" + +DELETE_STACK_SET_RESPONSE_TEMPLATE = """ + + + c35ec2d0-d69f-4c4d-9bd7-example + +""" + +CREATE_STACK_INSTANCES_TEMPLATE = """ + + 1459ad6d-63cc-4c96-a73e-example + + + 6b29f7e3-69be-4d32-b374-example + + +""" + +LIST_STACK_INSTANCES_TEMPLATE = """ + + + {% for instance in stackset.stack_instances %} + + {{ instance.StackId }} + {{ instance.StackSetId }} + {{ instance.Region }} + {{ instance.Account }} + {{ instance.Status }} + + {% endfor %} + + + + 83c27e73-b498-410f-993c-example + + +""" + +DELETE_STACK_INSTANCES_TEMPLATE = """ + + {{ operation.OperationId }} + + + e5325090-66f6-4ecd-a531-example + + +""" + +DESCRIBE_STACK_INSTANCE_TEMPLATE = """ + + + {{ instance.StackId }} + {{ instance.StackSetId }} + {% if instance.ParameterOverrides %} + + {% for override in instance.ParameterOverrides %} + {% if override['ParameterKey'] or override['ParameterValue'] %} + + {{ override.ParameterKey }} + false + {{ override.ParameterValue }} + + {% endif %} + {% endfor %} + + {% else %} + + {% endif %} + {{ instance.Region }} + {{ instance.Account }} + {{ instance.Status }} + + + + c6c7be10-0343-4319-8a25-example + + +""" + +LIST_STACK_SETS_TEMPLATE = """ + + + {% for key, value in stacksets.items() %} + + {{ value.name }} + {{ value.id }} + {{ value.status }} + + {% endfor %} + + + + 4dcacb73-841e-4ed8-b335-example + + +""" + +UPDATE_STACK_INSTANCES_RESPONSE_TEMPLATE = """ + + {{ operation }} + + + bdbf8e94-19b6-4ce4-af85-example + + +""" + +UPDATE_STACK_SET_RESPONSE_TEMPLATE = """ + + {{ operation.OperationId }} + + + adac907b-17e3-43e6-a254-example + + +""" + +LIST_STACK_SET_OPERATIONS_RESPONSE_TEMPLATE = """ + + + {% for operation in stackset.operations %} + + {{ operation.CreationTimestamp }} + {{ operation.OperationId }} + {{ operation.Action }} + {{ operation.EndTimestamp }} + {{ operation.Status }} + + {% endfor %} + + + + 65b9d9be-08bb-4a43-9a21-example + + +""" + +STOP_STACK_SET_OPERATION_RESPONSE_TEMPLATE = """ + + + 2188554a-07c6-4396-b2c5-example + +""" + +DESCRIBE_STACKSET_OPERATION_RESPONSE_TEMPLATE = """ + + + {{ stackset.execution_role }} + arn:aws:iam::123456789012:role/{{ stackset.admin_role }} + {{ stackset.id }} + {{ operation.CreationTimestamp }} + {{ operation.OperationId }} + {{ operation.Action }} + + + + {{ operation.EndTimestamp }} + {{ operation.Status }} + + + + 2edc27b6-9ce2-486a-a192-example + + +""" + +LIST_STACK_SET_OPERATION_RESULTS_RESPONSE_TEMPLATE = """ + + + {% for instance in operation.Instances %} + {% for account, region in instance.items() %} + + + Function not found: arn:aws:lambda:us-west-2:123456789012:function:AWSCloudFormationStackSetAccountGate + SKIPPED + + {{ region }} + {{ account }} + {{ operation.Status }} + + {% endfor %} + {% endfor %} + + + + ac05a9ce-5f98-4197-a29b-example + + +""" diff --git a/moto/cloudformation/utils.py b/moto/cloudformation/utils.py index 384ea5401..e4290ce1a 100644 --- a/moto/cloudformation/utils.py +++ b/moto/cloudformation/utils.py @@ -3,16 +3,34 @@ import uuid import six import random import yaml +import os +import string + +from cfnlint import decode, core -def generate_stack_id(stack_name): +def generate_stack_id(stack_name, region="us-east-1", account="123456789"): random_id = uuid.uuid4() - return "arn:aws:cloudformation:us-east-1:123456789:stack/{0}/{1}".format(stack_name, random_id) + return "arn:aws:cloudformation:{}:{}:stack/{}/{}".format(region, account, stack_name, random_id) + + +def generate_changeset_id(changeset_name, region_name): + random_id = uuid.uuid4() + return 'arn:aws:cloudformation:{0}:123456789:changeSet/{1}/{2}'.format(region_name, changeset_name, random_id) + + +def generate_stackset_id(stackset_name): + random_id = uuid.uuid4() + return '{}:{}'.format(stackset_name, random_id) + + +def generate_stackset_arn(stackset_id, region_name): + return 'arn:aws:cloudformation:{}:123456789012:stackset/{}'.format(region_name, stackset_id) def random_suffix(): size = 12 - chars = list(range(10)) + ['A-Z'] + chars = list(range(10)) + list(string.ascii_uppercase) return ''.join(six.text_type(random.choice(chars)) for x in range(size)) @@ -33,3 +51,33 @@ def yaml_tag_constructor(loader, tag, node): key = 'Fn::{}'.format(tag[1:]) return {key: _f(loader, tag, node)} + + +def validate_template_cfn_lint(template): + + # Save the template to a temporary file -- cfn-lint requires a file + filename = "file.tmp" + with open(filename, "w") as file: + file.write(template) + abs_filename = os.path.abspath(filename) + + # decode handles both yaml and json + template, matches = decode.decode(abs_filename, False) + + # Set cfn-lint to info + core.configure_logging(None) + + # Initialize the ruleset to be applied (no overrules, no excludes) + rules = core.get_rules([], [], []) + + # Use us-east-1 region (spec file) for validation + regions = ['us-east-1'] + + # Process all the rules and gather the errors + matches = core.run_checks( + abs_filename, + template, + rules, + regions) + + return matches diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index f9d571a23..ed644f874 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -1,15 +1,17 @@ -import json +import json +from moto.core.utils import iso_8601_datetime_with_milliseconds from moto.core import BaseBackend, BaseModel from moto.core.exceptions import RESTError import boto.ec2.cloudwatch -import datetime - +from datetime import datetime, timedelta +from dateutil.tz import tzutc from .utils import make_arn_for_dashboard - DEFAULT_ACCOUNT_ID = 123456789012 +_EMPTY_LIST = tuple() + class Dimension(object): @@ -18,6 +20,34 @@ class Dimension(object): self.value = value +def daterange(start, stop, step=timedelta(days=1), inclusive=False): + """ + This method will iterate from `start` to `stop` datetimes with a timedelta step of `step` + (supports iteration forwards or backwards in time) + + :param start: start datetime + :param stop: end datetime + :param step: step size as a timedelta + :param inclusive: if True, last item returned will be as step closest to `end` (or `end` if no remainder). + """ + + # inclusive=False to behave like range by default + total_step_secs = step.total_seconds() + assert total_step_secs != 0 + + if total_step_secs > 0: + while start < stop: + yield start + start = start + step + else: + while stop < start: + yield start + start = start + step + + if inclusive and start == stop: + yield start + + class FakeAlarm(BaseModel): def __init__(self, name, namespace, metric_name, comparison_operator, evaluation_periods, @@ -38,35 +68,36 @@ class FakeAlarm(BaseModel): self.ok_actions = ok_actions self.insufficient_data_actions = insufficient_data_actions self.unit = unit - self.configuration_updated_timestamp = datetime.datetime.utcnow() + self.configuration_updated_timestamp = datetime.utcnow() self.history = [] self.state_reason = '' self.state_reason_data = '{}' - self.state = 'OK' - self.state_updated_timestamp = datetime.datetime.utcnow() + self.state_value = 'OK' + self.state_updated_timestamp = datetime.utcnow() def update_state(self, reason, reason_data, state_value): # History type, that then decides what the rest of the items are, can be one of ConfigurationUpdate | StateUpdate | Action self.history.append( - ('StateUpdate', self.state_reason, self.state_reason_data, self.state, self.state_updated_timestamp) + ('StateUpdate', self.state_reason, self.state_reason_data, self.state_value, self.state_updated_timestamp) ) self.state_reason = reason self.state_reason_data = reason_data - self.state = state_value - self.state_updated_timestamp = datetime.datetime.utcnow() + self.state_value = state_value + self.state_updated_timestamp = datetime.utcnow() class MetricDatum(BaseModel): - def __init__(self, namespace, name, value, dimensions): + def __init__(self, namespace, name, value, dimensions, timestamp): self.namespace = namespace self.name = name self.value = value - self.dimensions = [Dimension(dimension['name'], dimension[ - 'value']) for dimension in dimensions] + self.timestamp = timestamp or datetime.utcnow().replace(tzinfo=tzutc()) + self.dimensions = [Dimension(dimension['Name'], dimension[ + 'Value']) for dimension in dimensions] class Dashboard(BaseModel): @@ -75,7 +106,7 @@ class Dashboard(BaseModel): self.arn = make_arn_for_dashboard(DEFAULT_ACCOUNT_ID, name) self.name = name self.body = body - self.last_modified = datetime.datetime.now() + self.last_modified = datetime.now() @property def last_modified_iso(self): @@ -92,6 +123,53 @@ class Dashboard(BaseModel): return ''.format(self.name) +class Statistics: + def __init__(self, stats, dt): + self.timestamp = iso_8601_datetime_with_milliseconds(dt) + self.values = [] + self.stats = stats + + @property + def sample_count(self): + if 'SampleCount' not in self.stats: + return None + + return len(self.values) + + @property + def unit(self): + return None + + @property + def sum(self): + if 'Sum' not in self.stats: + return None + + return sum(self.values) + + @property + def minimum(self): + if 'Minimum' not in self.stats: + return None + + return min(self.values) + + @property + def maximum(self): + if 'Maximum' not in self.stats: + return None + + return max(self.values) + + @property + def average(self): + if 'Average' not in self.stats: + return None + + # when moto is 3.4+ we can switch to the statistics module + return sum(self.values) / len(self.values) + + class CloudWatchBackend(BaseBackend): def __init__(self): @@ -143,16 +221,46 @@ class CloudWatchBackend(BaseBackend): ] def get_alarms_by_state_value(self, target_state): - return filter(lambda alarm: alarm.state == target_state, self.alarms.values()) + return filter(lambda alarm: alarm.state_value == target_state, self.alarms.values()) def delete_alarms(self, alarm_names): for alarm_name in alarm_names: self.alarms.pop(alarm_name, None) def put_metric_data(self, namespace, metric_data): - for name, value, dimensions in metric_data: + for metric_member in metric_data: + # Preserve "datetime" for get_metric_statistics comparisons + timestamp = metric_member.get('Timestamp') + if timestamp is not None and type(timestamp) != datetime: + timestamp = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ') + timestamp = timestamp.replace(tzinfo=tzutc()) self.metric_data.append(MetricDatum( - namespace, name, value, dimensions)) + namespace, metric_member['MetricName'], float(metric_member.get('Value', 0)), metric_member.get('Dimensions.member', _EMPTY_LIST), timestamp)) + + def get_metric_statistics(self, namespace, metric_name, start_time, end_time, period, stats): + period_delta = timedelta(seconds=period) + filtered_data = [md for md in self.metric_data if + md.namespace == namespace and md.name == metric_name and start_time <= md.timestamp <= end_time] + + # earliest to oldest + filtered_data = sorted(filtered_data, key=lambda x: x.timestamp) + if not filtered_data: + return [] + + idx = 0 + data = list() + for dt in daterange(filtered_data[0].timestamp, filtered_data[-1].timestamp + period_delta, period_delta): + s = Statistics(stats, dt) + while idx < len(filtered_data) and filtered_data[idx].timestamp < (dt + period_delta): + s.values.append(filtered_data[idx].value) + idx += 1 + + if not s.values: + continue + + data.append(s) + + return data def get_all_metrics(self): return self.metric_data diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index 7a5fa5ebd..bf176e1be 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -2,6 +2,7 @@ import json from moto.core.utils import amzn_request_id from moto.core.responses import BaseResponse from .models import cloudwatch_backends +from dateutil.parser import parse as dtparse class CloudWatchResponse(BaseResponse): @@ -75,35 +76,36 @@ class CloudWatchResponse(BaseResponse): @amzn_request_id def put_metric_data(self): namespace = self._get_param('Namespace') - metric_data = [] - metric_index = 1 - while True: - try: - metric_name = self.querystring[ - 'MetricData.member.{0}.MetricName'.format(metric_index)][0] - except KeyError: - break - value = self.querystring.get( - 'MetricData.member.{0}.Value'.format(metric_index), [None])[0] - dimensions = [] - dimension_index = 1 - while True: - try: - dimension_name = self.querystring[ - 'MetricData.member.{0}.Dimensions.member.{1}.Name'.format(metric_index, dimension_index)][0] - except KeyError: - break - dimension_value = self.querystring[ - 'MetricData.member.{0}.Dimensions.member.{1}.Value'.format(metric_index, dimension_index)][0] - dimensions.append( - {'name': dimension_name, 'value': dimension_value}) - dimension_index += 1 - metric_data.append([metric_name, value, dimensions]) - metric_index += 1 + metric_data = self._get_multi_param('MetricData.member') + self.cloudwatch_backend.put_metric_data(namespace, metric_data) template = self.response_template(PUT_METRIC_DATA_TEMPLATE) return template.render() + @amzn_request_id + def get_metric_statistics(self): + namespace = self._get_param('Namespace') + metric_name = self._get_param('MetricName') + start_time = dtparse(self._get_param('StartTime')) + end_time = dtparse(self._get_param('EndTime')) + period = int(self._get_param('Period')) + statistics = self._get_multi_param("Statistics.member") + + # Unsupported Parameters (To Be Implemented) + unit = self._get_param('Unit') + extended_statistics = self._get_param('ExtendedStatistics') + dimensions = self._get_param('Dimensions') + if unit or extended_statistics or dimensions: + raise NotImplemented() + + # TODO: this should instead throw InvalidParameterCombination + if not statistics: + raise NotImplemented("Must specify either Statistics or ExtendedStatistics") + + datapoints = self.cloudwatch_backend.get_metric_statistics(namespace, metric_name, start_time, end_time, period, statistics) + template = self.response_template(GET_METRIC_STATISTICS_TEMPLATE) + return template.render(label=metric_name, datapoints=datapoints) + @amzn_request_id def list_metrics(self): metrics = self.cloudwatch_backend.get_all_metrics() @@ -150,10 +152,6 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(GET_DASHBOARD_TEMPLATE) return template.render(dashboard=dashboard) - @amzn_request_id - def get_metric_statistics(self): - raise NotImplementedError() - @amzn_request_id def list_dashboards(self): prefix = self._get_param('DashboardNamePrefix', '') @@ -266,6 +264,50 @@ PUT_METRIC_DATA_TEMPLATE = """ + + + {{ request_id }} + + + + + + + {% for datapoint in datapoints %} + + {% if datapoint.sum is not none %} + {{ datapoint.sum }} + {% endif %} + + {% if datapoint.average is not none %} + {{ datapoint.average }} + {% endif %} + + {% if datapoint.maximum is not none %} + {{ datapoint.maximum }} + {% endif %} + + {% if datapoint.minimum is not none %} + {{ datapoint.minimum }} + {% endif %} + + {% if datapoint.sample_count is not none %} + {{ datapoint.sample_count }} + {% endif %} + + {% if datapoint.extended_statistics is not none %} + {{ datapoint.extended_statistics }} + {% endif %} + + {{ datapoint.timestamp }} + {{ datapoint.unit }} + + {% endfor %} + + +""" + LIST_METRICS_TEMPLATE = """ diff --git a/moto/cognitoidentity/__init__.py b/moto/cognitoidentity/__init__.py new file mode 100644 index 000000000..2f040fa19 --- /dev/null +++ b/moto/cognitoidentity/__init__.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals +from .models import cognitoidentity_backends +from ..core.models import base_decorator, deprecated_base_decorator + +cognitoidentity_backend = cognitoidentity_backends['us-east-1'] +mock_cognitoidentity = base_decorator(cognitoidentity_backends) +mock_cognitoidentity_deprecated = deprecated_base_decorator(cognitoidentity_backends) diff --git a/moto/cognitoidentity/models.py b/moto/cognitoidentity/models.py new file mode 100644 index 000000000..c916b7f62 --- /dev/null +++ b/moto/cognitoidentity/models.py @@ -0,0 +1,110 @@ +from __future__ import unicode_literals + +import datetime +import json + +import boto.cognito.identity + +from moto.compat import OrderedDict +from moto.core import BaseBackend, BaseModel +from moto.core.utils import iso_8601_datetime_with_milliseconds + +from .utils import get_random_identity_id + + +class CognitoIdentity(BaseModel): + + def __init__(self, region, identity_pool_name, **kwargs): + self.identity_pool_name = identity_pool_name + self.allow_unauthenticated_identities = kwargs.get('allow_unauthenticated_identities', '') + self.supported_login_providers = kwargs.get('supported_login_providers', {}) + self.developer_provider_name = kwargs.get('developer_provider_name', '') + self.open_id_connect_provider_arns = kwargs.get('open_id_connect_provider_arns', []) + self.cognito_identity_providers = kwargs.get('cognito_identity_providers', []) + self.saml_provider_arns = kwargs.get('saml_provider_arns', []) + + self.identity_pool_id = get_random_identity_id(region) + self.creation_time = datetime.datetime.utcnow() + + +class CognitoIdentityBackend(BaseBackend): + + def __init__(self, region): + super(CognitoIdentityBackend, self).__init__() + self.region = region + self.identity_pools = OrderedDict() + + def reset(self): + region = self.region + self.__dict__ = {} + self.__init__(region) + + def create_identity_pool(self, identity_pool_name, allow_unauthenticated_identities, + supported_login_providers, developer_provider_name, open_id_connect_provider_arns, + cognito_identity_providers, saml_provider_arns): + + new_identity = CognitoIdentity(self.region, identity_pool_name, + allow_unauthenticated_identities=allow_unauthenticated_identities, + supported_login_providers=supported_login_providers, + developer_provider_name=developer_provider_name, + open_id_connect_provider_arns=open_id_connect_provider_arns, + cognito_identity_providers=cognito_identity_providers, + saml_provider_arns=saml_provider_arns) + self.identity_pools[new_identity.identity_pool_id] = new_identity + + response = json.dumps({ + 'IdentityPoolId': new_identity.identity_pool_id, + 'IdentityPoolName': new_identity.identity_pool_name, + 'AllowUnauthenticatedIdentities': new_identity.allow_unauthenticated_identities, + 'SupportedLoginProviders': new_identity.supported_login_providers, + 'DeveloperProviderName': new_identity.developer_provider_name, + 'OpenIdConnectProviderARNs': new_identity.open_id_connect_provider_arns, + 'CognitoIdentityProviders': new_identity.cognito_identity_providers, + 'SamlProviderARNs': new_identity.saml_provider_arns + }) + + return response + + def get_id(self): + identity_id = {'IdentityId': get_random_identity_id(self.region)} + return json.dumps(identity_id) + + def get_credentials_for_identity(self, identity_id): + duration = 90 + now = datetime.datetime.utcnow() + expiration = now + datetime.timedelta(seconds=duration) + expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) + response = json.dumps( + { + "Credentials": + { + "AccessKeyId": "TESTACCESSKEY12345", + "Expiration": expiration_str, + "SecretKey": "ABCSECRETKEY", + "SessionToken": "ABC12345" + }, + "IdentityId": identity_id + }) + return response + + def get_open_id_token_for_developer_identity(self, identity_id): + response = json.dumps( + { + "IdentityId": identity_id, + "Token": get_random_identity_id(self.region) + }) + return response + + def get_open_id_token(self, identity_id): + response = json.dumps( + { + "IdentityId": identity_id, + "Token": get_random_identity_id(self.region) + } + ) + return response + + +cognitoidentity_backends = {} +for region in boto.cognito.identity.regions(): + cognitoidentity_backends[region.name] = CognitoIdentityBackend(region.name) diff --git a/moto/cognitoidentity/responses.py b/moto/cognitoidentity/responses.py new file mode 100644 index 000000000..33faaa300 --- /dev/null +++ b/moto/cognitoidentity/responses.py @@ -0,0 +1,42 @@ +from __future__ import unicode_literals + +from moto.core.responses import BaseResponse + +from .models import cognitoidentity_backends +from .utils import get_random_identity_id + + +class CognitoIdentityResponse(BaseResponse): + + def create_identity_pool(self): + identity_pool_name = self._get_param('IdentityPoolName') + allow_unauthenticated_identities = self._get_param('AllowUnauthenticatedIdentities') + supported_login_providers = self._get_param('SupportedLoginProviders') + developer_provider_name = self._get_param('DeveloperProviderName') + open_id_connect_provider_arns = self._get_param('OpenIdConnectProviderARNs') + cognito_identity_providers = self._get_param('CognitoIdentityProviders') + saml_provider_arns = self._get_param('SamlProviderARNs') + return cognitoidentity_backends[self.region].create_identity_pool( + identity_pool_name=identity_pool_name, + allow_unauthenticated_identities=allow_unauthenticated_identities, + supported_login_providers=supported_login_providers, + developer_provider_name=developer_provider_name, + open_id_connect_provider_arns=open_id_connect_provider_arns, + cognito_identity_providers=cognito_identity_providers, + saml_provider_arns=saml_provider_arns) + + def get_id(self): + return cognitoidentity_backends[self.region].get_id() + + def get_credentials_for_identity(self): + return cognitoidentity_backends[self.region].get_credentials_for_identity(self._get_param('IdentityId')) + + def get_open_id_token_for_developer_identity(self): + return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity( + self._get_param('IdentityId') or get_random_identity_id(self.region) + ) + + def get_open_id_token(self): + return cognitoidentity_backends[self.region].get_open_id_token( + self._get_param("IdentityId") or get_random_identity_id(self.region) + ) diff --git a/moto/cognitoidentity/urls.py b/moto/cognitoidentity/urls.py new file mode 100644 index 000000000..3fe63ef07 --- /dev/null +++ b/moto/cognitoidentity/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import CognitoIdentityResponse + +url_bases = [ + "https?://cognito-identity.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': CognitoIdentityResponse.dispatch, +} diff --git a/moto/cognitoidentity/utils.py b/moto/cognitoidentity/utils.py new file mode 100644 index 000000000..6143d5121 --- /dev/null +++ b/moto/cognitoidentity/utils.py @@ -0,0 +1,5 @@ +from moto.core.utils import get_random_hex + + +def get_random_identity_id(region): + return "{0}:{1}".format(region, get_random_hex(length=19)) diff --git a/moto/cognitoidp/__init__.py b/moto/cognitoidp/__init__.py new file mode 100644 index 000000000..676e2dd77 --- /dev/null +++ b/moto/cognitoidp/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import cognitoidp_backends +from ..core.models import base_decorator, deprecated_base_decorator + +mock_cognitoidp = base_decorator(cognitoidp_backends) +mock_cognitoidp_deprecated = deprecated_base_decorator(cognitoidp_backends) diff --git a/moto/cognitoidp/exceptions.py b/moto/cognitoidp/exceptions.py new file mode 100644 index 000000000..452670213 --- /dev/null +++ b/moto/cognitoidp/exceptions.py @@ -0,0 +1,44 @@ +from __future__ import unicode_literals + +import json +from werkzeug.exceptions import BadRequest + + +class ResourceNotFoundError(BadRequest): + + def __init__(self, message): + super(ResourceNotFoundError, self).__init__() + self.description = json.dumps({ + "message": message, + '__type': 'ResourceNotFoundException', + }) + + +class UserNotFoundError(BadRequest): + + def __init__(self, message): + super(UserNotFoundError, self).__init__() + self.description = json.dumps({ + "message": message, + '__type': 'UserNotFoundException', + }) + + +class GroupExistsException(BadRequest): + + def __init__(self, message): + super(GroupExistsException, self).__init__() + self.description = json.dumps({ + "message": message, + '__type': 'GroupExistsException', + }) + + +class NotAuthorizedError(BadRequest): + + def __init__(self, message): + super(NotAuthorizedError, self).__init__() + self.description = json.dumps({ + "message": message, + '__type': 'NotAuthorizedException', + }) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py new file mode 100644 index 000000000..2c82367c6 --- /dev/null +++ b/moto/cognitoidp/models.py @@ -0,0 +1,746 @@ +from __future__ import unicode_literals + +import datetime +import functools +import hashlib +import itertools +import json +import os +import time +import uuid + +import boto.cognito.identity +from jose import jws + +from moto.compat import OrderedDict +from moto.core import BaseBackend, BaseModel +from .exceptions import GroupExistsException, NotAuthorizedError, ResourceNotFoundError, UserNotFoundError + +UserStatus = { + "FORCE_CHANGE_PASSWORD": "FORCE_CHANGE_PASSWORD", + "CONFIRMED": "CONFIRMED", +} + + +def paginate(limit, start_arg="next_token", limit_arg="max_results"): + """Returns a limited result list, and an offset into list of remaining items + + Takes the next_token, and max_results kwargs given to a function and handles + the slicing of the results. The kwarg `next_token` is the offset into the + list to begin slicing from. `max_results` is the size of the result required + + If the max_results is not supplied then the `limit` parameter is used as a + default + + :param limit_arg: the name of argument in the decorated function that + controls amount of items returned + :param start_arg: the name of the argument in the decorated that provides + the starting offset + :param limit: A default maximum items to return + :return: a tuple containing a list of items, and the offset into the list + """ + default_start = 0 + + def outer_wrapper(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + start = int(default_start if kwargs.get(start_arg) is None else kwargs[start_arg]) + lim = int(limit if kwargs.get(limit_arg) is None else kwargs[limit_arg]) + stop = start + lim + result = func(*args, **kwargs) + limited_results = list(itertools.islice(result, start, stop)) + next_token = stop if stop < len(result) else None + return limited_results, next_token + return wrapper + return outer_wrapper + + +class CognitoIdpUserPool(BaseModel): + + def __init__(self, region, name, extended_config): + self.region = region + self.id = "{}_{}".format(self.region, str(uuid.uuid4().hex)) + self.name = name + self.status = None + self.extended_config = extended_config or {} + self.creation_date = datetime.datetime.utcnow() + self.last_modified_date = datetime.datetime.utcnow() + + self.clients = OrderedDict() + self.identity_providers = OrderedDict() + self.groups = OrderedDict() + self.users = OrderedDict() + self.refresh_tokens = {} + self.access_tokens = {} + self.id_tokens = {} + + with open(os.path.join(os.path.dirname(__file__), "resources/jwks-private.json")) as f: + self.json_web_key = json.loads(f.read()) + + def _base_json(self): + return { + "Id": self.id, + "Name": self.name, + "Status": self.status, + "CreationDate": time.mktime(self.creation_date.timetuple()), + "LastModifiedDate": time.mktime(self.last_modified_date.timetuple()), + } + + def to_json(self, extended=False): + user_pool_json = self._base_json() + if extended: + user_pool_json.update(self.extended_config) + else: + user_pool_json["LambdaConfig"] = self.extended_config.get("LambdaConfig") or {} + + return user_pool_json + + def create_jwt(self, client_id, username, expires_in=60 * 60, extra_data={}): + now = int(time.time()) + payload = { + "iss": "https://cognito-idp.{}.amazonaws.com/{}".format(self.region, self.id), + "sub": self.users[username].id, + "aud": client_id, + "token_use": "id", + "auth_time": now, + "exp": now + expires_in, + } + payload.update(extra_data) + + return jws.sign(payload, self.json_web_key, algorithm='RS256'), expires_in + + def create_id_token(self, client_id, username): + id_token, expires_in = self.create_jwt(client_id, username) + self.id_tokens[id_token] = (client_id, username) + return id_token, expires_in + + def create_refresh_token(self, client_id, username): + refresh_token = str(uuid.uuid4()) + self.refresh_tokens[refresh_token] = (client_id, username) + return refresh_token + + def create_access_token(self, client_id, username): + extra_data = self.get_user_extra_data_by_client_id( + client_id, username + ) + access_token, expires_in = self.create_jwt(client_id, username, + extra_data=extra_data) + self.access_tokens[access_token] = (client_id, username) + return access_token, expires_in + + def create_tokens_from_refresh_token(self, refresh_token): + client_id, username = self.refresh_tokens.get(refresh_token) + if not username: + raise NotAuthorizedError(refresh_token) + + access_token, expires_in = self.create_access_token(client_id, username) + id_token, _ = self.create_id_token(client_id, username) + return access_token, id_token, expires_in + + def get_user_extra_data_by_client_id(self, client_id, username): + extra_data = {} + current_client = self.clients.get(client_id, None) + if current_client: + for readable_field in current_client.get_readable_fields(): + attribute = list(filter( + lambda f: f['Name'] == readable_field, + self.users.get(username).attributes + )) + if len(attribute) > 0: + extra_data.update({ + attribute[0]['Name']: attribute[0]['Value'] + }) + return extra_data + + +class CognitoIdpUserPoolDomain(BaseModel): + + def __init__(self, user_pool_id, domain, custom_domain_config=None): + self.user_pool_id = user_pool_id + self.domain = domain + self.custom_domain_config = custom_domain_config or {} + + def _distribution_name(self): + if self.custom_domain_config and \ + 'CertificateArn' in self.custom_domain_config: + hash = hashlib.md5( + self.custom_domain_config['CertificateArn'].encode('utf-8') + ).hexdigest() + return "{hash}.cloudfront.net".format(hash=hash[:16]) + return None + + def to_json(self, extended=True): + distribution = self._distribution_name() + if extended: + return { + "UserPoolId": self.user_pool_id, + "AWSAccountId": str(uuid.uuid4()), + "CloudFrontDistribution": distribution, + "Domain": self.domain, + "S3Bucket": None, + "Status": "ACTIVE", + "Version": None, + } + elif distribution: + return { + "CloudFrontDomain": distribution, + } + return None + + +class CognitoIdpUserPoolClient(BaseModel): + + def __init__(self, user_pool_id, extended_config): + self.user_pool_id = user_pool_id + self.id = str(uuid.uuid4()) + self.secret = str(uuid.uuid4()) + self.extended_config = extended_config or {} + + def _base_json(self): + return { + "ClientId": self.id, + "ClientName": self.extended_config.get("ClientName"), + "UserPoolId": self.user_pool_id, + } + + def to_json(self, extended=False): + user_pool_client_json = self._base_json() + if extended: + user_pool_client_json.update(self.extended_config) + + return user_pool_client_json + + def get_readable_fields(self): + return self.extended_config.get('ReadAttributes', []) + + +class CognitoIdpIdentityProvider(BaseModel): + + def __init__(self, name, extended_config): + self.name = name + self.extended_config = extended_config or {} + self.creation_date = datetime.datetime.utcnow() + self.last_modified_date = datetime.datetime.utcnow() + + def _base_json(self): + return { + "ProviderName": self.name, + "ProviderType": self.extended_config.get("ProviderType"), + "CreationDate": time.mktime(self.creation_date.timetuple()), + "LastModifiedDate": time.mktime(self.last_modified_date.timetuple()), + } + + def to_json(self, extended=False): + identity_provider_json = self._base_json() + if extended: + identity_provider_json.update(self.extended_config) + + return identity_provider_json + + +class CognitoIdpGroup(BaseModel): + + def __init__(self, user_pool_id, group_name, description, role_arn, precedence): + self.user_pool_id = user_pool_id + self.group_name = group_name + self.description = description or "" + self.role_arn = role_arn + self.precedence = precedence + self.last_modified_date = datetime.datetime.now() + self.creation_date = self.last_modified_date + + # Users who are members of this group. + # Note that these links are bidirectional. + self.users = set() + + def to_json(self): + return { + "GroupName": self.group_name, + "UserPoolId": self.user_pool_id, + "Description": self.description, + "RoleArn": self.role_arn, + "Precedence": self.precedence, + "LastModifiedDate": time.mktime(self.last_modified_date.timetuple()), + "CreationDate": time.mktime(self.creation_date.timetuple()), + } + + +class CognitoIdpUser(BaseModel): + + def __init__(self, user_pool_id, username, password, status, attributes): + self.id = str(uuid.uuid4()) + self.user_pool_id = user_pool_id + self.username = username + self.password = password + self.status = status + self.enabled = True + self.attributes = attributes + self.create_date = datetime.datetime.utcnow() + self.last_modified_date = datetime.datetime.utcnow() + + # Groups this user is a member of. + # Note that these links are bidirectional. + self.groups = set() + + def _base_json(self): + return { + "UserPoolId": self.user_pool_id, + "Username": self.username, + "UserStatus": self.status, + "UserCreateDate": time.mktime(self.create_date.timetuple()), + "UserLastModifiedDate": time.mktime(self.last_modified_date.timetuple()), + } + + # list_users brings back "Attributes" while admin_get_user brings back "UserAttributes". + def to_json(self, extended=False, attributes_key="Attributes"): + user_json = self._base_json() + if extended: + user_json.update( + { + "Enabled": self.enabled, + attributes_key: self.attributes, + "MFAOptions": [] + } + ) + + return user_json + + def update_attributes(self, new_attributes): + + def flatten_attrs(attrs): + return {attr['Name']: attr['Value'] for attr in attrs} + + def expand_attrs(attrs): + return [{'Name': k, 'Value': v} for k, v in attrs.items()] + + flat_attributes = flatten_attrs(self.attributes) + flat_attributes.update(flatten_attrs(new_attributes)) + self.attributes = expand_attrs(flat_attributes) + + +class CognitoIdpBackend(BaseBackend): + + def __init__(self, region): + super(CognitoIdpBackend, self).__init__() + self.region = region + self.user_pools = OrderedDict() + self.user_pool_domains = OrderedDict() + self.sessions = {} + + def reset(self): + region = self.region + self.__dict__ = {} + self.__init__(region) + + # User pool + def create_user_pool(self, name, extended_config): + user_pool = CognitoIdpUserPool(self.region, name, extended_config) + self.user_pools[user_pool.id] = user_pool + return user_pool + + @paginate(60) + def list_user_pools(self, max_results=None, next_token=None): + return self.user_pools.values() + + def describe_user_pool(self, user_pool_id): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + return user_pool + + def delete_user_pool(self, user_pool_id): + if user_pool_id not in self.user_pools: + raise ResourceNotFoundError(user_pool_id) + + del self.user_pools[user_pool_id] + + # User pool domain + def create_user_pool_domain(self, user_pool_id, domain, custom_domain_config=None): + if user_pool_id not in self.user_pools: + raise ResourceNotFoundError(user_pool_id) + + user_pool_domain = CognitoIdpUserPoolDomain( + user_pool_id, domain, custom_domain_config=custom_domain_config + ) + self.user_pool_domains[domain] = user_pool_domain + return user_pool_domain + + def describe_user_pool_domain(self, domain): + if domain not in self.user_pool_domains: + return None + + return self.user_pool_domains[domain] + + def delete_user_pool_domain(self, domain): + if domain not in self.user_pool_domains: + raise ResourceNotFoundError(domain) + + del self.user_pool_domains[domain] + + def update_user_pool_domain(self, domain, custom_domain_config): + if domain not in self.user_pool_domains: + raise ResourceNotFoundError(domain) + + user_pool_domain = self.user_pool_domains[domain] + user_pool_domain.custom_domain_config = custom_domain_config + return user_pool_domain + + # User pool client + def create_user_pool_client(self, user_pool_id, extended_config): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + user_pool_client = CognitoIdpUserPoolClient(user_pool_id, extended_config) + user_pool.clients[user_pool_client.id] = user_pool_client + return user_pool_client + + @paginate(60) + def list_user_pool_clients(self, user_pool_id, max_results=None, next_token=None): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + return user_pool.clients.values() + + def describe_user_pool_client(self, user_pool_id, client_id): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + client = user_pool.clients.get(client_id) + if not client: + raise ResourceNotFoundError(client_id) + + return client + + def update_user_pool_client(self, user_pool_id, client_id, extended_config): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + client = user_pool.clients.get(client_id) + if not client: + raise ResourceNotFoundError(client_id) + + client.extended_config.update(extended_config) + return client + + def delete_user_pool_client(self, user_pool_id, client_id): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if client_id not in user_pool.clients: + raise ResourceNotFoundError(client_id) + + del user_pool.clients[client_id] + + # Identity provider + def create_identity_provider(self, user_pool_id, name, extended_config): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + identity_provider = CognitoIdpIdentityProvider(name, extended_config) + user_pool.identity_providers[name] = identity_provider + return identity_provider + + @paginate(60) + def list_identity_providers(self, user_pool_id, max_results=None, next_token=None): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + return user_pool.identity_providers.values() + + def describe_identity_provider(self, user_pool_id, name): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + identity_provider = user_pool.identity_providers.get(name) + if not identity_provider: + raise ResourceNotFoundError(name) + + return identity_provider + + def update_identity_provider(self, user_pool_id, name, extended_config): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + identity_provider = user_pool.identity_providers.get(name) + if not identity_provider: + raise ResourceNotFoundError(name) + + identity_provider.extended_config.update(extended_config) + + return identity_provider + + def delete_identity_provider(self, user_pool_id, name): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if name not in user_pool.identity_providers: + raise ResourceNotFoundError(name) + + del user_pool.identity_providers[name] + + # Group + def create_group(self, user_pool_id, group_name, description, role_arn, precedence): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + group = CognitoIdpGroup(user_pool_id, group_name, description, role_arn, precedence) + if group.group_name in user_pool.groups: + raise GroupExistsException("A group with the name already exists") + user_pool.groups[group.group_name] = group + + return group + + def get_group(self, user_pool_id, group_name): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if group_name not in user_pool.groups: + raise ResourceNotFoundError(group_name) + + return user_pool.groups[group_name] + + def list_groups(self, user_pool_id): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + return user_pool.groups.values() + + def delete_group(self, user_pool_id, group_name): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if group_name not in user_pool.groups: + raise ResourceNotFoundError(group_name) + + group = user_pool.groups[group_name] + for user in group.users: + user.groups.remove(group) + + del user_pool.groups[group_name] + + def admin_add_user_to_group(self, user_pool_id, group_name, username): + group = self.get_group(user_pool_id, group_name) + user = self.admin_get_user(user_pool_id, username) + + group.users.add(user) + user.groups.add(group) + + def list_users_in_group(self, user_pool_id, group_name): + group = self.get_group(user_pool_id, group_name) + return list(group.users) + + def admin_list_groups_for_user(self, user_pool_id, username): + user = self.admin_get_user(user_pool_id, username) + return list(user.groups) + + def admin_remove_user_from_group(self, user_pool_id, group_name, username): + group = self.get_group(user_pool_id, group_name) + user = self.admin_get_user(user_pool_id, username) + + group.users.discard(user) + user.groups.discard(group) + + # User + def admin_create_user(self, user_pool_id, username, temporary_password, attributes): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + user = CognitoIdpUser(user_pool_id, username, temporary_password, UserStatus["FORCE_CHANGE_PASSWORD"], attributes) + user_pool.users[user.username] = user + return user + + def admin_get_user(self, user_pool_id, username): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if username not in user_pool.users: + raise UserNotFoundError(username) + + return user_pool.users[username] + + @paginate(60, "pagination_token", "limit") + def list_users(self, user_pool_id, pagination_token=None, limit=None): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + return user_pool.users.values() + + def admin_disable_user(self, user_pool_id, username): + user = self.admin_get_user(user_pool_id, username) + user.enabled = False + + def admin_enable_user(self, user_pool_id, username): + user = self.admin_get_user(user_pool_id, username) + user.enabled = True + + def admin_delete_user(self, user_pool_id, username): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if username not in user_pool.users: + raise UserNotFoundError(username) + + user = user_pool.users[username] + for group in user.groups: + group.users.remove(user) + + del user_pool.users[username] + + def _log_user_in(self, user_pool, client, username): + refresh_token = user_pool.create_refresh_token(client.id, username) + access_token, id_token, expires_in = user_pool.create_tokens_from_refresh_token(refresh_token) + + return { + "AuthenticationResult": { + "IdToken": id_token, + "AccessToken": access_token, + "RefreshToken": refresh_token, + "ExpiresIn": expires_in, + } + } + + def admin_initiate_auth(self, user_pool_id, client_id, auth_flow, auth_parameters): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + client = user_pool.clients.get(client_id) + if not client: + raise ResourceNotFoundError(client_id) + + if auth_flow == "ADMIN_NO_SRP_AUTH": + username = auth_parameters.get("USERNAME") + password = auth_parameters.get("PASSWORD") + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + if user.password != password: + raise NotAuthorizedError(username) + + if user.status == UserStatus["FORCE_CHANGE_PASSWORD"]: + session = str(uuid.uuid4()) + self.sessions[session] = user_pool + + return { + "ChallengeName": "NEW_PASSWORD_REQUIRED", + "ChallengeParameters": {}, + "Session": session, + } + + return self._log_user_in(user_pool, client, username) + elif auth_flow == "REFRESH_TOKEN": + refresh_token = auth_parameters.get("REFRESH_TOKEN") + id_token, access_token, expires_in = user_pool.create_tokens_from_refresh_token(refresh_token) + + return { + "AuthenticationResult": { + "IdToken": id_token, + "AccessToken": access_token, + "ExpiresIn": expires_in, + } + } + else: + return {} + + def respond_to_auth_challenge(self, session, client_id, challenge_name, challenge_responses): + user_pool = self.sessions.get(session) + if not user_pool: + raise ResourceNotFoundError(session) + + client = user_pool.clients.get(client_id) + if not client: + raise ResourceNotFoundError(client_id) + + if challenge_name == "NEW_PASSWORD_REQUIRED": + username = challenge_responses.get("USERNAME") + new_password = challenge_responses.get("NEW_PASSWORD") + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + user.password = new_password + user.status = UserStatus["CONFIRMED"] + del self.sessions[session] + + return self._log_user_in(user_pool, client, username) + else: + return {} + + def confirm_forgot_password(self, client_id, username, password): + for user_pool in self.user_pools.values(): + if client_id in user_pool.clients and username in user_pool.users: + user_pool.users[username].password = password + break + else: + raise ResourceNotFoundError(client_id) + + def change_password(self, access_token, previous_password, proposed_password): + for user_pool in self.user_pools.values(): + if access_token in user_pool.access_tokens: + _, username = user_pool.access_tokens[access_token] + user = user_pool.users.get(username) + if not user: + raise UserNotFoundError(username) + + if user.password != previous_password: + raise NotAuthorizedError(username) + + user.password = proposed_password + if user.status == UserStatus["FORCE_CHANGE_PASSWORD"]: + user.status = UserStatus["CONFIRMED"] + + break + else: + raise NotAuthorizedError(access_token) + + def admin_update_user_attributes(self, user_pool_id, username, attributes): + user_pool = self.user_pools.get(user_pool_id) + if not user_pool: + raise ResourceNotFoundError(user_pool_id) + + if username not in user_pool.users: + raise UserNotFoundError(username) + + user = user_pool.users[username] + user.update_attributes(attributes) + + +cognitoidp_backends = {} +for region in boto.cognito.identity.regions(): + cognitoidp_backends[region.name] = CognitoIdpBackend(region.name) + + +# Hack to help moto-server process requests on localhost, where the region isn't +# specified in the host header. Some endpoints (change password, confirm forgot +# password) have no authorization header from which to extract the region. +def find_region_by_value(key, value): + for region in cognitoidp_backends: + backend = cognitoidp_backends[region] + for user_pool in backend.user_pools.values(): + if key == "client_id" and value in user_pool.clients: + return region + + if key == "access_token" and value in user_pool.access_tokens: + return region + + return cognitoidp_backends.keys()[0] diff --git a/moto/cognitoidp/resources/jwks-private.json b/moto/cognitoidp/resources/jwks-private.json new file mode 100644 index 000000000..8dde9c336 --- /dev/null +++ b/moto/cognitoidp/resources/jwks-private.json @@ -0,0 +1,9 @@ +{ + "alg": "RS256", + "d": "DrrLT2qMERN0Id-bNglOe6SVkUNF3MTIzrH-TVkMZhsHk8kyqiqt-8JbLQMh2gOgTIjpu93b2_UREGA0BGdWs34hv0v7Gx8uIngCY6e6XO8LDemOo-2VHZHl5Ew-lrRYhwq12c_c4mfavAdMzXHODrpXSnqLnbFK88S-3fu6Da4czc4Svo4v8MkGZk_fcTml3Y1jIFHxbbTWka37j4NLpAzdfvX--J086m-LbZ8CJL_lGMKbAKsWURMmzCFL9ZFH9JdzX79KeDOH0GrzGwS_cOsZHsCamF_CWrtG4asPt-SHyn_k0X4JJJgAWVA674VCqorMAPDVYIzKJOUMImmsEQ", + "e": "AQAB", + "kid": "dummy", + "kty": "RSA", + "n": "j1pT3xKbswmMySvCefmiD3mfDaRFpZ9Y3Jl4fF0hMaCRVAt_e0yR7BeueDfqmj_NhVSO0WB5ao5e8V-9RFQOtK8SrqKl3i01-CyWYPICwybaGKhbJJR0S_6cZ8n5kscF1MjpIlsJcCzm-yKgTc3Mxk6KtrLoNgRvMwGLeHUXPkhS9YHfDKRe864iMFOK4df69brIYEICG2VLduh0hXYa0i-J3drwm7vxNdX7pVpCDu34qJtYoWq6CXt3Tzfi3YfWp8cFjGNbaDa3WnCd2IXpp0TFsFS-cEsw5rJjSl5OllJGeZKBtLeyVTy9PYwnk7MW43WSYeYstbk9NluX4H8Iuw", + "use": "sig" +} diff --git a/moto/cognitoidp/resources/jwks-public.json b/moto/cognitoidp/resources/jwks-public.json new file mode 100644 index 000000000..a5309c7f5 --- /dev/null +++ b/moto/cognitoidp/resources/jwks-public.json @@ -0,0 +1,12 @@ +{ + "keys": [ + { + "alg": "RS256", + "e": "AQAB", + "kid": "dummy", + "kty": "RSA", + "n": "j1pT3xKbswmMySvCefmiD3mfDaRFpZ9Y3Jl4fF0hMaCRVAt_e0yR7BeueDfqmj_NhVSO0WB5ao5e8V-9RFQOtK8SrqKl3i01-CyWYPICwybaGKhbJJR0S_6cZ8n5kscF1MjpIlsJcCzm-yKgTc3Mxk6KtrLoNgRvMwGLeHUXPkhS9YHfDKRe864iMFOK4df69brIYEICG2VLduh0hXYa0i-J3drwm7vxNdX7pVpCDu34qJtYoWq6CXt3Tzfi3YfWp8cFjGNbaDa3WnCd2IXpp0TFsFS-cEsw5rJjSl5OllJGeZKBtLeyVTy9PYwnk7MW43WSYeYstbk9NluX4H8Iuw", + "use": "sig" + } + ] +} diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py new file mode 100644 index 000000000..75dd8c181 --- /dev/null +++ b/moto/cognitoidp/responses.py @@ -0,0 +1,387 @@ +from __future__ import unicode_literals + +import json +import os + +from moto.core.responses import BaseResponse +from .models import cognitoidp_backends, find_region_by_value + + +class CognitoIdpResponse(BaseResponse): + + @property + def parameters(self): + return json.loads(self.body) + + # User pool + def create_user_pool(self): + name = self.parameters.pop("PoolName") + user_pool = cognitoidp_backends[self.region].create_user_pool(name, self.parameters) + return json.dumps({ + "UserPool": user_pool.to_json(extended=True) + }) + + def list_user_pools(self): + max_results = self._get_param("MaxResults") + next_token = self._get_param("NextToken", "0") + user_pools, next_token = cognitoidp_backends[self.region].list_user_pools( + max_results=max_results, next_token=next_token + ) + response = { + "UserPools": [user_pool.to_json() for user_pool in user_pools], + } + if next_token: + response["NextToken"] = str(next_token) + return json.dumps(response) + + def describe_user_pool(self): + user_pool_id = self._get_param("UserPoolId") + user_pool = cognitoidp_backends[self.region].describe_user_pool(user_pool_id) + return json.dumps({ + "UserPool": user_pool.to_json(extended=True) + }) + + def delete_user_pool(self): + user_pool_id = self._get_param("UserPoolId") + cognitoidp_backends[self.region].delete_user_pool(user_pool_id) + return "" + + # User pool domain + def create_user_pool_domain(self): + domain = self._get_param("Domain") + user_pool_id = self._get_param("UserPoolId") + custom_domain_config = self._get_param("CustomDomainConfig") + user_pool_domain = cognitoidp_backends[self.region].create_user_pool_domain( + user_pool_id, domain, custom_domain_config + ) + domain_description = user_pool_domain.to_json(extended=False) + if domain_description: + return json.dumps(domain_description) + return "" + + def describe_user_pool_domain(self): + domain = self._get_param("Domain") + user_pool_domain = cognitoidp_backends[self.region].describe_user_pool_domain(domain) + domain_description = {} + if user_pool_domain: + domain_description = user_pool_domain.to_json() + + return json.dumps({ + "DomainDescription": domain_description + }) + + def delete_user_pool_domain(self): + domain = self._get_param("Domain") + cognitoidp_backends[self.region].delete_user_pool_domain(domain) + return "" + + def update_user_pool_domain(self): + domain = self._get_param("Domain") + custom_domain_config = self._get_param("CustomDomainConfig") + user_pool_domain = cognitoidp_backends[self.region].update_user_pool_domain( + domain, custom_domain_config + ) + domain_description = user_pool_domain.to_json(extended=False) + if domain_description: + return json.dumps(domain_description) + return "" + + # User pool client + def create_user_pool_client(self): + user_pool_id = self.parameters.pop("UserPoolId") + user_pool_client = cognitoidp_backends[self.region].create_user_pool_client(user_pool_id, self.parameters) + return json.dumps({ + "UserPoolClient": user_pool_client.to_json(extended=True) + }) + + def list_user_pool_clients(self): + user_pool_id = self._get_param("UserPoolId") + max_results = self._get_param("MaxResults") + next_token = self._get_param("NextToken", "0") + user_pool_clients, next_token = cognitoidp_backends[self.region].list_user_pool_clients(user_pool_id, + max_results=max_results, next_token=next_token) + response = { + "UserPoolClients": [user_pool_client.to_json() for user_pool_client in user_pool_clients] + } + if next_token: + response["NextToken"] = str(next_token) + return json.dumps(response) + + def describe_user_pool_client(self): + user_pool_id = self._get_param("UserPoolId") + client_id = self._get_param("ClientId") + user_pool_client = cognitoidp_backends[self.region].describe_user_pool_client(user_pool_id, client_id) + return json.dumps({ + "UserPoolClient": user_pool_client.to_json(extended=True) + }) + + def update_user_pool_client(self): + user_pool_id = self.parameters.pop("UserPoolId") + client_id = self.parameters.pop("ClientId") + user_pool_client = cognitoidp_backends[self.region].update_user_pool_client(user_pool_id, client_id, self.parameters) + return json.dumps({ + "UserPoolClient": user_pool_client.to_json(extended=True) + }) + + def delete_user_pool_client(self): + user_pool_id = self._get_param("UserPoolId") + client_id = self._get_param("ClientId") + cognitoidp_backends[self.region].delete_user_pool_client(user_pool_id, client_id) + return "" + + # Identity provider + def create_identity_provider(self): + user_pool_id = self._get_param("UserPoolId") + name = self.parameters.pop("ProviderName") + identity_provider = cognitoidp_backends[self.region].create_identity_provider(user_pool_id, name, self.parameters) + return json.dumps({ + "IdentityProvider": identity_provider.to_json(extended=True) + }) + + def list_identity_providers(self): + user_pool_id = self._get_param("UserPoolId") + max_results = self._get_param("MaxResults") + next_token = self._get_param("NextToken", "0") + identity_providers, next_token = cognitoidp_backends[self.region].list_identity_providers( + user_pool_id, max_results=max_results, next_token=next_token + ) + response = { + "Providers": [identity_provider.to_json() for identity_provider in identity_providers] + } + if next_token: + response["NextToken"] = str(next_token) + return json.dumps(response) + + def describe_identity_provider(self): + user_pool_id = self._get_param("UserPoolId") + name = self._get_param("ProviderName") + identity_provider = cognitoidp_backends[self.region].describe_identity_provider(user_pool_id, name) + return json.dumps({ + "IdentityProvider": identity_provider.to_json(extended=True) + }) + + def update_identity_provider(self): + user_pool_id = self._get_param("UserPoolId") + name = self._get_param("ProviderName") + identity_provider = cognitoidp_backends[self.region].update_identity_provider(user_pool_id, name, self.parameters) + return json.dumps({ + "IdentityProvider": identity_provider.to_json(extended=True) + }) + + def delete_identity_provider(self): + user_pool_id = self._get_param("UserPoolId") + name = self._get_param("ProviderName") + cognitoidp_backends[self.region].delete_identity_provider(user_pool_id, name) + return "" + + # Group + def create_group(self): + group_name = self._get_param("GroupName") + user_pool_id = self._get_param("UserPoolId") + description = self._get_param("Description") + role_arn = self._get_param("RoleArn") + precedence = self._get_param("Precedence") + + group = cognitoidp_backends[self.region].create_group( + user_pool_id, + group_name, + description, + role_arn, + precedence, + ) + + return json.dumps({ + "Group": group.to_json(), + }) + + def get_group(self): + group_name = self._get_param("GroupName") + user_pool_id = self._get_param("UserPoolId") + group = cognitoidp_backends[self.region].get_group(user_pool_id, group_name) + return json.dumps({ + "Group": group.to_json(), + }) + + def list_groups(self): + user_pool_id = self._get_param("UserPoolId") + groups = cognitoidp_backends[self.region].list_groups(user_pool_id) + return json.dumps({ + "Groups": [group.to_json() for group in groups], + }) + + def delete_group(self): + group_name = self._get_param("GroupName") + user_pool_id = self._get_param("UserPoolId") + cognitoidp_backends[self.region].delete_group(user_pool_id, group_name) + return "" + + def admin_add_user_to_group(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + group_name = self._get_param("GroupName") + + cognitoidp_backends[self.region].admin_add_user_to_group( + user_pool_id, + group_name, + username, + ) + + return "" + + def list_users_in_group(self): + user_pool_id = self._get_param("UserPoolId") + group_name = self._get_param("GroupName") + users = cognitoidp_backends[self.region].list_users_in_group(user_pool_id, group_name) + return json.dumps({ + "Users": [user.to_json(extended=True) for user in users], + }) + + def admin_list_groups_for_user(self): + username = self._get_param("Username") + user_pool_id = self._get_param("UserPoolId") + groups = cognitoidp_backends[self.region].admin_list_groups_for_user(user_pool_id, username) + return json.dumps({ + "Groups": [group.to_json() for group in groups], + }) + + def admin_remove_user_from_group(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + group_name = self._get_param("GroupName") + + cognitoidp_backends[self.region].admin_remove_user_from_group( + user_pool_id, + group_name, + username, + ) + + return "" + + # User + def admin_create_user(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + temporary_password = self._get_param("TemporaryPassword") + user = cognitoidp_backends[self.region].admin_create_user( + user_pool_id, + username, + temporary_password, + self._get_param("UserAttributes", []) + ) + + return json.dumps({ + "User": user.to_json(extended=True) + }) + + def admin_get_user(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + user = cognitoidp_backends[self.region].admin_get_user(user_pool_id, username) + return json.dumps( + user.to_json(extended=True, attributes_key="UserAttributes") + ) + + def list_users(self): + user_pool_id = self._get_param("UserPoolId") + limit = self._get_param("Limit") + token = self._get_param("PaginationToken") + users, token = cognitoidp_backends[self.region].list_users(user_pool_id, + limit=limit, + pagination_token=token) + response = {"Users": [user.to_json(extended=True) for user in users]} + if token: + response["PaginationToken"] = str(token) + return json.dumps(response) + + def admin_disable_user(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + cognitoidp_backends[self.region].admin_disable_user(user_pool_id, username) + return "" + + def admin_enable_user(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + cognitoidp_backends[self.region].admin_enable_user(user_pool_id, username) + return "" + + def admin_delete_user(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + cognitoidp_backends[self.region].admin_delete_user(user_pool_id, username) + return "" + + def admin_initiate_auth(self): + user_pool_id = self._get_param("UserPoolId") + client_id = self._get_param("ClientId") + auth_flow = self._get_param("AuthFlow") + auth_parameters = self._get_param("AuthParameters") + + auth_result = cognitoidp_backends[self.region].admin_initiate_auth( + user_pool_id, + client_id, + auth_flow, + auth_parameters, + ) + + return json.dumps(auth_result) + + def respond_to_auth_challenge(self): + session = self._get_param("Session") + client_id = self._get_param("ClientId") + challenge_name = self._get_param("ChallengeName") + challenge_responses = self._get_param("ChallengeResponses") + auth_result = cognitoidp_backends[self.region].respond_to_auth_challenge( + session, + client_id, + challenge_name, + challenge_responses, + ) + + return json.dumps(auth_result) + + def forgot_password(self): + return json.dumps({ + "CodeDeliveryDetails": { + "DeliveryMedium": "EMAIL", + "Destination": "...", + } + }) + + # This endpoint receives no authorization header, so if moto-server is listening + # on localhost (doesn't get a region in the host header), it doesn't know what + # region's backend should handle the traffic, and we use `find_region_by_value` to + # solve that problem. + def confirm_forgot_password(self): + client_id = self._get_param("ClientId") + username = self._get_param("Username") + password = self._get_param("Password") + region = find_region_by_value("client_id", client_id) + cognitoidp_backends[region].confirm_forgot_password(client_id, username, password) + return "" + + # Ditto the comment on confirm_forgot_password. + def change_password(self): + access_token = self._get_param("AccessToken") + previous_password = self._get_param("PreviousPassword") + proposed_password = self._get_param("ProposedPassword") + region = find_region_by_value("access_token", access_token) + cognitoidp_backends[region].change_password(access_token, previous_password, proposed_password) + return "" + + def admin_update_user_attributes(self): + user_pool_id = self._get_param("UserPoolId") + username = self._get_param("Username") + attributes = self._get_param("UserAttributes") + cognitoidp_backends[self.region].admin_update_user_attributes(user_pool_id, username, attributes) + return "" + + +class CognitoIdpJsonWebKeyResponse(BaseResponse): + + def __init__(self): + with open(os.path.join(os.path.dirname(__file__), "resources/jwks-public.json")) as f: + self.json_web_key = f.read() + + def serve_json_web_key(self, request, full_url, headers): + return 200, {"Content-Type": "application/json"}, self.json_web_key diff --git a/moto/cognitoidp/urls.py b/moto/cognitoidp/urls.py new file mode 100644 index 000000000..77441ed5e --- /dev/null +++ b/moto/cognitoidp/urls.py @@ -0,0 +1,11 @@ +from __future__ import unicode_literals +from .responses import CognitoIdpResponse, CognitoIdpJsonWebKeyResponse + +url_bases = [ + "https?://cognito-idp.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': CognitoIdpResponse.dispatch, + '{0}//.well-known/jwks.json$': CognitoIdpJsonWebKeyResponse().serve_json_web_key, +} diff --git a/moto/config/__init__.py b/moto/config/__init__.py new file mode 100644 index 000000000..9ca6a5917 --- /dev/null +++ b/moto/config/__init__.py @@ -0,0 +1,4 @@ +from .models import config_backends +from ..core.models import base_decorator + +mock_config = base_decorator(config_backends) diff --git a/moto/config/exceptions.py b/moto/config/exceptions.py new file mode 100644 index 000000000..b2b01d6a0 --- /dev/null +++ b/moto/config/exceptions.py @@ -0,0 +1,149 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class NameTooLongException(JsonRESTError): + code = 400 + + def __init__(self, name, location): + message = '1 validation error detected: Value \'{name}\' at \'{location}\' failed to satisfy' \ + ' constraint: Member must have length less than or equal to 256'.format(name=name, location=location) + super(NameTooLongException, self).__init__("ValidationException", message) + + +class InvalidConfigurationRecorderNameException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'The configuration recorder name \'{name}\' is not valid, blank string.'.format(name=name) + super(InvalidConfigurationRecorderNameException, self).__init__("InvalidConfigurationRecorderNameException", + message) + + +class MaxNumberOfConfigurationRecordersExceededException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Failed to put configuration recorder \'{name}\' because the maximum number of ' \ + 'configuration recorders: 1 is reached.'.format(name=name) + super(MaxNumberOfConfigurationRecordersExceededException, self).__init__( + "MaxNumberOfConfigurationRecordersExceededException", message) + + +class InvalidRecordingGroupException(JsonRESTError): + code = 400 + + def __init__(self): + message = 'The recording group provided is not valid' + super(InvalidRecordingGroupException, self).__init__("InvalidRecordingGroupException", message) + + +class InvalidResourceTypeException(JsonRESTError): + code = 400 + + def __init__(self, bad_list, good_list): + message = '{num} validation error detected: Value \'{bad_list}\' at ' \ + '\'configurationRecorder.recordingGroup.resourceTypes\' failed to satisfy constraint: ' \ + 'Member must satisfy constraint: [Member must satisfy enum value set: {good_list}]'.format( + num=len(bad_list), bad_list=bad_list, good_list=good_list) + # For PY2: + message = str(message) + + super(InvalidResourceTypeException, self).__init__("ValidationException", message) + + +class NoSuchConfigurationRecorderException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Cannot find configuration recorder with the specified name \'{name}\'.'.format(name=name) + super(NoSuchConfigurationRecorderException, self).__init__("NoSuchConfigurationRecorderException", message) + + +class InvalidDeliveryChannelNameException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'The delivery channel name \'{name}\' is not valid, blank string.'.format(name=name) + super(InvalidDeliveryChannelNameException, self).__init__("InvalidDeliveryChannelNameException", + message) + + +class NoSuchBucketException(JsonRESTError): + """We are *only* validating that there is value that is not '' here.""" + code = 400 + + def __init__(self): + message = 'Cannot find a S3 bucket with an empty bucket name.' + super(NoSuchBucketException, self).__init__("NoSuchBucketException", message) + + +class InvalidS3KeyPrefixException(JsonRESTError): + code = 400 + + def __init__(self): + message = 'The s3 key prefix \'\' is not valid, empty s3 key prefix.' + super(InvalidS3KeyPrefixException, self).__init__("InvalidS3KeyPrefixException", message) + + +class InvalidSNSTopicARNException(JsonRESTError): + """We are *only* validating that there is value that is not '' here.""" + code = 400 + + def __init__(self): + message = 'The sns topic arn \'\' is not valid.' + super(InvalidSNSTopicARNException, self).__init__("InvalidSNSTopicARNException", message) + + +class InvalidDeliveryFrequency(JsonRESTError): + code = 400 + + def __init__(self, value, good_list): + message = '1 validation error detected: Value \'{value}\' at ' \ + '\'deliveryChannel.configSnapshotDeliveryProperties.deliveryFrequency\' failed to satisfy ' \ + 'constraint: Member must satisfy enum value set: {good_list}'.format(value=value, good_list=good_list) + super(InvalidDeliveryFrequency, self).__init__("InvalidDeliveryFrequency", message) + + +class MaxNumberOfDeliveryChannelsExceededException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Failed to put delivery channel \'{name}\' because the maximum number of ' \ + 'delivery channels: 1 is reached.'.format(name=name) + super(MaxNumberOfDeliveryChannelsExceededException, self).__init__( + "MaxNumberOfDeliveryChannelsExceededException", message) + + +class NoSuchDeliveryChannelException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Cannot find delivery channel with specified name \'{name}\'.'.format(name=name) + super(NoSuchDeliveryChannelException, self).__init__("NoSuchDeliveryChannelException", message) + + +class NoAvailableConfigurationRecorderException(JsonRESTError): + code = 400 + + def __init__(self): + message = 'Configuration recorder is not available to put delivery channel.' + super(NoAvailableConfigurationRecorderException, self).__init__("NoAvailableConfigurationRecorderException", + message) + + +class NoAvailableDeliveryChannelException(JsonRESTError): + code = 400 + + def __init__(self): + message = 'Delivery channel is not available to start configuration recorder.' + super(NoAvailableDeliveryChannelException, self).__init__("NoAvailableDeliveryChannelException", message) + + +class LastDeliveryChannelDeleteFailedException(JsonRESTError): + code = 400 + + def __init__(self, name): + message = 'Failed to delete last specified delivery channel with name \'{name}\', because there, ' \ + 'because there is a running configuration recorder.'.format(name=name) + super(LastDeliveryChannelDeleteFailedException, self).__init__("LastDeliveryChannelDeleteFailedException", message) diff --git a/moto/config/models.py b/moto/config/models.py new file mode 100644 index 000000000..cd6e07afa --- /dev/null +++ b/moto/config/models.py @@ -0,0 +1,335 @@ +import json +import time +import pkg_resources + +from datetime import datetime + +from boto3 import Session + +from moto.config.exceptions import InvalidResourceTypeException, InvalidDeliveryFrequency, \ + InvalidConfigurationRecorderNameException, NameTooLongException, \ + MaxNumberOfConfigurationRecordersExceededException, InvalidRecordingGroupException, \ + NoSuchConfigurationRecorderException, NoAvailableConfigurationRecorderException, \ + InvalidDeliveryChannelNameException, NoSuchBucketException, InvalidS3KeyPrefixException, \ + InvalidSNSTopicARNException, MaxNumberOfDeliveryChannelsExceededException, NoAvailableDeliveryChannelException, \ + NoSuchDeliveryChannelException, LastDeliveryChannelDeleteFailedException + +from moto.core import BaseBackend, BaseModel + +DEFAULT_ACCOUNT_ID = 123456789012 + + +def datetime2int(date): + return int(time.mktime(date.timetuple())) + + +def snake_to_camels(original): + parts = original.split('_') + + camel_cased = parts[0].lower() + ''.join(p.title() for p in parts[1:]) + camel_cased = camel_cased.replace('Arn', 'ARN') # Config uses 'ARN' instead of 'Arn' + + return camel_cased + + +class ConfigEmptyDictable(BaseModel): + """Base class to make serialization easy. This assumes that the sub-class will NOT return 'None's in the JSON.""" + + def to_dict(self): + data = {} + for item, value in self.__dict__.items(): + if value is not None: + if isinstance(value, ConfigEmptyDictable): + data[snake_to_camels(item)] = value.to_dict() + else: + data[snake_to_camels(item)] = value + + return data + + +class ConfigRecorderStatus(ConfigEmptyDictable): + + def __init__(self, name): + self.name = name + + self.recording = False + self.last_start_time = None + self.last_stop_time = None + self.last_status = None + self.last_error_code = None + self.last_error_message = None + self.last_status_change_time = None + + def start(self): + self.recording = True + self.last_status = 'PENDING' + self.last_start_time = datetime2int(datetime.utcnow()) + self.last_status_change_time = datetime2int(datetime.utcnow()) + + def stop(self): + self.recording = False + self.last_stop_time = datetime2int(datetime.utcnow()) + self.last_status_change_time = datetime2int(datetime.utcnow()) + + +class ConfigDeliverySnapshotProperties(ConfigEmptyDictable): + + def __init__(self, delivery_frequency): + self.delivery_frequency = delivery_frequency + + +class ConfigDeliveryChannel(ConfigEmptyDictable): + + def __init__(self, name, s3_bucket_name, prefix=None, sns_arn=None, snapshot_properties=None): + self.name = name + self.s3_bucket_name = s3_bucket_name + self.s3_key_prefix = prefix + self.sns_topic_arn = sns_arn + self.config_snapshot_delivery_properties = snapshot_properties + + +class RecordingGroup(ConfigEmptyDictable): + + def __init__(self, all_supported=True, include_global_resource_types=False, resource_types=None): + self.all_supported = all_supported + self.include_global_resource_types = include_global_resource_types + self.resource_types = resource_types + + +class ConfigRecorder(ConfigEmptyDictable): + + def __init__(self, role_arn, recording_group, name='default', status=None): + self.name = name + self.role_arn = role_arn + self.recording_group = recording_group + + if not status: + self.status = ConfigRecorderStatus(name) + else: + self.status = status + + +class ConfigBackend(BaseBackend): + + def __init__(self): + self.recorders = {} + self.delivery_channels = {} + + @staticmethod + def _validate_resource_types(resource_list): + # Load the service file: + resource_package = 'botocore' + resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json')) + conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path)) + + # Verify that each entry exists in the supported list: + bad_list = [] + for resource in resource_list: + # For PY2: + r_str = str(resource) + + if r_str not in conifg_schema['shapes']['ResourceType']['enum']: + bad_list.append(r_str) + + if bad_list: + raise InvalidResourceTypeException(bad_list, conifg_schema['shapes']['ResourceType']['enum']) + + @staticmethod + def _validate_delivery_snapshot_properties(properties): + # Load the service file: + resource_package = 'botocore' + resource_path = '/'.join(('data', 'config', '2014-11-12', 'service-2.json')) + conifg_schema = json.loads(pkg_resources.resource_string(resource_package, resource_path)) + + # Verify that the deliveryFrequency is set to an acceptable value: + if properties.get('deliveryFrequency', None) not in \ + conifg_schema['shapes']['MaximumExecutionFrequency']['enum']: + raise InvalidDeliveryFrequency(properties.get('deliveryFrequency', None), + conifg_schema['shapes']['MaximumExecutionFrequency']['enum']) + + def put_configuration_recorder(self, config_recorder): + # Validate the name: + if not config_recorder.get('name'): + raise InvalidConfigurationRecorderNameException(config_recorder.get('name')) + if len(config_recorder.get('name')) > 256: + raise NameTooLongException(config_recorder.get('name'), 'configurationRecorder.name') + + # We're going to assume that the passed in Role ARN is correct. + + # Config currently only allows 1 configuration recorder for an account: + if len(self.recorders) == 1 and not self.recorders.get(config_recorder['name']): + raise MaxNumberOfConfigurationRecordersExceededException(config_recorder['name']) + + # Is this updating an existing one? + recorder_status = None + if self.recorders.get(config_recorder['name']): + recorder_status = self.recorders[config_recorder['name']].status + + # Validate the Recording Group: + if config_recorder.get('recordingGroup') is None: + recording_group = RecordingGroup() + else: + rg = config_recorder['recordingGroup'] + + # If an empty dict is passed in, then bad: + if not rg: + raise InvalidRecordingGroupException() + + # Can't have both the resource types specified and the other flags as True. + if rg.get('resourceTypes') and ( + rg.get('allSupported', False) or + rg.get('includeGlobalResourceTypes', False)): + raise InvalidRecordingGroupException() + + # Must supply resourceTypes if 'allSupported' is not supplied: + if not rg.get('allSupported') and not rg.get('resourceTypes'): + raise InvalidRecordingGroupException() + + # Validate that the list provided is correct: + self._validate_resource_types(rg.get('resourceTypes', [])) + + recording_group = RecordingGroup( + all_supported=rg.get('allSupported', True), + include_global_resource_types=rg.get('includeGlobalResourceTypes', False), + resource_types=rg.get('resourceTypes', []) + ) + + self.recorders[config_recorder['name']] = \ + ConfigRecorder(config_recorder['roleARN'], recording_group, name=config_recorder['name'], + status=recorder_status) + + def describe_configuration_recorders(self, recorder_names): + recorders = [] + + if recorder_names: + for rn in recorder_names: + if not self.recorders.get(rn): + raise NoSuchConfigurationRecorderException(rn) + + # Format the recorder: + recorders.append(self.recorders[rn].to_dict()) + + else: + for recorder in self.recorders.values(): + recorders.append(recorder.to_dict()) + + return recorders + + def describe_configuration_recorder_status(self, recorder_names): + recorders = [] + + if recorder_names: + for rn in recorder_names: + if not self.recorders.get(rn): + raise NoSuchConfigurationRecorderException(rn) + + # Format the recorder: + recorders.append(self.recorders[rn].status.to_dict()) + + else: + for recorder in self.recorders.values(): + recorders.append(recorder.status.to_dict()) + + return recorders + + def put_delivery_channel(self, delivery_channel): + # Must have a configuration recorder: + if not self.recorders: + raise NoAvailableConfigurationRecorderException() + + # Validate the name: + if not delivery_channel.get('name'): + raise InvalidDeliveryChannelNameException(delivery_channel.get('name')) + if len(delivery_channel.get('name')) > 256: + raise NameTooLongException(delivery_channel.get('name'), 'deliveryChannel.name') + + # We are going to assume that the bucket exists -- but will verify if the bucket provided is blank: + if not delivery_channel.get('s3BucketName'): + raise NoSuchBucketException() + + # We are going to assume that the bucket has the correct policy attached to it. We are only going to verify + # if the prefix provided is not an empty string: + if delivery_channel.get('s3KeyPrefix', None) == '': + raise InvalidS3KeyPrefixException() + + # Ditto for SNS -- Only going to assume that the ARN provided is not an empty string: + if delivery_channel.get('snsTopicARN', None) == '': + raise InvalidSNSTopicARNException() + + # Config currently only allows 1 delivery channel for an account: + if len(self.delivery_channels) == 1 and not self.delivery_channels.get(delivery_channel['name']): + raise MaxNumberOfDeliveryChannelsExceededException(delivery_channel['name']) + + if not delivery_channel.get('configSnapshotDeliveryProperties'): + dp = None + + else: + # Validate the config snapshot delivery properties: + self._validate_delivery_snapshot_properties(delivery_channel['configSnapshotDeliveryProperties']) + + dp = ConfigDeliverySnapshotProperties( + delivery_channel['configSnapshotDeliveryProperties']['deliveryFrequency']) + + self.delivery_channels[delivery_channel['name']] = \ + ConfigDeliveryChannel(delivery_channel['name'], delivery_channel['s3BucketName'], + prefix=delivery_channel.get('s3KeyPrefix', None), + sns_arn=delivery_channel.get('snsTopicARN', None), + snapshot_properties=dp) + + def describe_delivery_channels(self, channel_names): + channels = [] + + if channel_names: + for cn in channel_names: + if not self.delivery_channels.get(cn): + raise NoSuchDeliveryChannelException(cn) + + # Format the delivery channel: + channels.append(self.delivery_channels[cn].to_dict()) + + else: + for channel in self.delivery_channels.values(): + channels.append(channel.to_dict()) + + return channels + + def start_configuration_recorder(self, recorder_name): + if not self.recorders.get(recorder_name): + raise NoSuchConfigurationRecorderException(recorder_name) + + # Must have a delivery channel available as well: + if not self.delivery_channels: + raise NoAvailableDeliveryChannelException() + + # Start recording: + self.recorders[recorder_name].status.start() + + def stop_configuration_recorder(self, recorder_name): + if not self.recorders.get(recorder_name): + raise NoSuchConfigurationRecorderException(recorder_name) + + # Stop recording: + self.recorders[recorder_name].status.stop() + + def delete_configuration_recorder(self, recorder_name): + if not self.recorders.get(recorder_name): + raise NoSuchConfigurationRecorderException(recorder_name) + + del self.recorders[recorder_name] + + def delete_delivery_channel(self, channel_name): + if not self.delivery_channels.get(channel_name): + raise NoSuchDeliveryChannelException(channel_name) + + # Check if a channel is recording -- if so, bad -- (there can only be 1 recorder): + for recorder in self.recorders.values(): + if recorder.status.recording: + raise LastDeliveryChannelDeleteFailedException(channel_name) + + del self.delivery_channels[channel_name] + + +config_backends = {} +boto3_session = Session() +for region in boto3_session.get_available_regions('config'): + config_backends[region] = ConfigBackend() diff --git a/moto/config/responses.py b/moto/config/responses.py new file mode 100644 index 000000000..286b2349f --- /dev/null +++ b/moto/config/responses.py @@ -0,0 +1,53 @@ +import json +from moto.core.responses import BaseResponse +from .models import config_backends + + +class ConfigResponse(BaseResponse): + + @property + def config_backend(self): + return config_backends[self.region] + + def put_configuration_recorder(self): + self.config_backend.put_configuration_recorder(self._get_param('ConfigurationRecorder')) + return "" + + def describe_configuration_recorders(self): + recorders = self.config_backend.describe_configuration_recorders(self._get_param('ConfigurationRecorderNames')) + schema = {'ConfigurationRecorders': recorders} + return json.dumps(schema) + + def describe_configuration_recorder_status(self): + recorder_statuses = self.config_backend.describe_configuration_recorder_status( + self._get_param('ConfigurationRecorderNames')) + schema = {'ConfigurationRecordersStatus': recorder_statuses} + return json.dumps(schema) + + def put_delivery_channel(self): + self.config_backend.put_delivery_channel(self._get_param('DeliveryChannel')) + return "" + + def describe_delivery_channels(self): + delivery_channels = self.config_backend.describe_delivery_channels(self._get_param('DeliveryChannelNames')) + schema = {'DeliveryChannels': delivery_channels} + return json.dumps(schema) + + def describe_delivery_channel_status(self): + raise NotImplementedError() + + def delete_delivery_channel(self): + self.config_backend.delete_delivery_channel(self._get_param('DeliveryChannelName')) + return "" + + def delete_configuration_recorder(self): + self.config_backend.delete_configuration_recorder(self._get_param('ConfigurationRecorderName')) + return "" + + def start_configuration_recorder(self): + self.config_backend.start_configuration_recorder(self._get_param('ConfigurationRecorderName')) + return "" + + def stop_configuration_recorder(self): + self.config_backend.stop_configuration_recorder(self._get_param('ConfigurationRecorderName')) + return "" diff --git a/moto/config/urls.py b/moto/config/urls.py new file mode 100644 index 000000000..fd7b6969f --- /dev/null +++ b/moto/config/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import ConfigResponse + +url_bases = [ + "https?://config.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': ConfigResponse.dispatch, +} diff --git a/moto/core/models.py b/moto/core/models.py index c6fb72ffa..896f9ac4a 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -2,14 +2,19 @@ from __future__ import unicode_literals from __future__ import absolute_import -from collections import defaultdict import functools import inspect +import os import re import six +from io import BytesIO +from collections import defaultdict +from botocore.handlers import BUILTIN_HANDLERS +from botocore.awsrequest import AWSResponse +import mock from moto import settings -from moto.packages.responses import responses +import responses from moto.packages.httpretty import HTTPretty from .utils import ( convert_httpretty_response, @@ -33,6 +38,10 @@ class BaseMockAWS(object): self.backends_for_urls.update(self.backends) self.backends_for_urls.update(default_backends) + # "Mock" the AWS credentials as they can't be mocked in Botocore currently + FAKE_KEYS = {"AWS_ACCESS_KEY_ID": "foobar_key", "AWS_SECRET_ACCESS_KEY": "foobar_secret"} + self.env_variables_mocks = mock.patch.dict(os.environ, FAKE_KEYS) + if self.__class__.nested_count == 0: self.reset() @@ -43,11 +52,14 @@ class BaseMockAWS(object): def __enter__(self): self.start() + return self def __exit__(self, *args): self.stop() def start(self, reset=True): + self.env_variables_mocks.start() + self.__class__.nested_count += 1 if reset: for backend in self.backends.values(): @@ -56,6 +68,7 @@ class BaseMockAWS(object): self.enable_patching() def stop(self): + self.env_variables_mocks.stop() self.__class__.nested_count -= 1 if self.__class__.nested_count < 0: @@ -89,6 +102,17 @@ class BaseMockAWS(object): if inspect.ismethod(attr_value) and attr_value.__self__ is klass: continue + # Check if this is a staticmethod. If so, skip patching + for cls in inspect.getmro(klass): + if attr_value.__name__ not in cls.__dict__: + continue + bound_attr_value = cls.__dict__[attr_value.__name__] + if not isinstance(bound_attr_value, staticmethod): + break + else: + # It is a staticmethod, skip patching + continue + try: setattr(klass, attr, self(attr_value, reset=False)) except TypeError: @@ -124,34 +148,209 @@ RESPONSES_METHODS = [responses.GET, responses.DELETE, responses.HEAD, responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT] -class ResponsesMockAWS(BaseMockAWS): +class CallbackResponse(responses.CallbackResponse): + ''' + Need to subclass so we can change a couple things + ''' + def get_response(self, request): + ''' + Need to override this so we can pass decode_content=False + ''' + headers = self.get_headers() + result = self.callback(request) + if isinstance(result, Exception): + raise result + + status, r_headers, body = result + body = responses._handle_body(body) + headers.update(r_headers) + + return responses.HTTPResponse( + status=status, + reason=six.moves.http_client.responses.get(status), + body=body, + headers=headers, + preload_content=False, + # Need to not decode_content to mimic requests + decode_content=False, + ) + + def _url_matches(self, url, other, match_querystring=False): + ''' + Need to override this so we can fix querystrings breaking regex matching + ''' + if not match_querystring: + other = other.split('?', 1)[0] + + if responses._is_string(url): + if responses._has_unicode(url): + url = responses._clean_unicode(url) + if not isinstance(other, six.text_type): + other = other.encode('ascii').decode('utf8') + return self._url_matches_strict(url, other) + elif isinstance(url, responses.Pattern) and url.match(other): + return True + else: + return False + + +botocore_mock = responses.RequestsMock(assert_all_requests_are_fired=False, target='botocore.vendored.requests.adapters.HTTPAdapter.send') +responses_mock = responses._default_mock + + +class ResponsesMockAWS(BaseMockAWS): def reset(self): - responses.reset() + botocore_mock.reset() + responses_mock.reset() def enable_patching(self): - responses.start() + if not hasattr(botocore_mock, '_patcher') or not hasattr(botocore_mock._patcher, 'target'): + # Check for unactivated patcher + botocore_mock.start() + + if not hasattr(responses_mock, '_patcher') or not hasattr(responses_mock._patcher, 'target'): + responses_mock.start() + for method in RESPONSES_METHODS: for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): - responses.add_callback( - method=method, - url=re.compile(key), - callback=convert_flask_to_responses_response(value), + responses_mock.add( + CallbackResponse( + method=method, + url=re.compile(key), + callback=convert_flask_to_responses_response(value), + stream=True, + match_querystring=False, + ) + ) + botocore_mock.add( + CallbackResponse( + method=method, + url=re.compile(key), + callback=convert_flask_to_responses_response(value), + stream=True, + match_querystring=False, + ) ) - - for pattern in responses.mock._urls: - pattern['stream'] = True def disable_patching(self): try: - responses.stop() - except AttributeError: + botocore_mock.stop() + except RuntimeError: + pass + + try: + responses_mock.stop() + except RuntimeError: pass - responses.reset() -MockAWS = ResponsesMockAWS +BOTOCORE_HTTP_METHODS = [ + 'GET', 'DELETE', 'HEAD', 'OPTIONS', 'PATCH', 'POST', 'PUT' +] + + +class MockRawResponse(BytesIO): + def __init__(self, input): + if isinstance(input, six.text_type): + input = input.encode('utf-8') + super(MockRawResponse, self).__init__(input) + + def stream(self, **kwargs): + contents = self.read() + while contents: + yield contents + contents = self.read() + + +class BotocoreStubber(object): + def __init__(self): + self.enabled = False + self.methods = defaultdict(list) + + def reset(self): + self.methods.clear() + + def register_response(self, method, pattern, response): + matchers = self.methods[method] + matchers.append((pattern, response)) + + def __call__(self, event_name, request, **kwargs): + if not self.enabled: + return None + + response = None + response_callback = None + found_index = None + matchers = self.methods.get(request.method) + + base_url = request.url.split('?', 1)[0] + for i, (pattern, callback) in enumerate(matchers): + if pattern.match(base_url): + if found_index is None: + found_index = i + response_callback = callback + else: + matchers.pop(found_index) + break + + if response_callback is not None: + for header, value in request.headers.items(): + if isinstance(value, six.binary_type): + request.headers[header] = value.decode('utf-8') + status, headers, body = response_callback(request, request.url, request.headers) + body = MockRawResponse(body) + response = AWSResponse(request.url, status, headers, body) + + return response + + +botocore_stubber = BotocoreStubber() +BUILTIN_HANDLERS.append(('before-send', botocore_stubber)) + + +class BotocoreEventMockAWS(BaseMockAWS): + def reset(self): + botocore_stubber.reset() + responses_mock.reset() + + def enable_patching(self): + botocore_stubber.enabled = True + for method in BOTOCORE_HTTP_METHODS: + for backend in self.backends_for_urls.values(): + for key, value in backend.urls.items(): + pattern = re.compile(key) + botocore_stubber.register_response(method, pattern, value) + + if not hasattr(responses_mock, '_patcher') or not hasattr(responses_mock._patcher, 'target'): + responses_mock.start() + + for method in RESPONSES_METHODS: + # for backend in default_backends.values(): + for backend in self.backends_for_urls.values(): + for key, value in backend.urls.items(): + responses_mock.add( + CallbackResponse( + method=method, + url=re.compile(key), + callback=convert_flask_to_responses_response(value), + stream=True, + match_querystring=False, + ) + ) + + def disable_patching(self): + botocore_stubber.enabled = False + self.reset() + + try: + responses_mock.stop() + except RuntimeError: + pass + + +MockAWS = BotocoreEventMockAWS class ServerModeMockAWS(BaseMockAWS): @@ -270,10 +469,14 @@ class BaseModel(object): class BaseBackend(object): - def reset(self): + def _reset_model_refs(self): + # Remove all references to the models stored for service, models in model_data.items(): for model_name, model in models.items(): model.instances = [] + + def reset(self): + self._reset_model_refs() self.__dict__ = {} self.__init__() diff --git a/moto/core/responses.py b/moto/core/responses.py index be0a4ef45..9da36b865 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -5,6 +5,7 @@ import datetime import json import logging import re +import io import pytz from moto.core.exceptions import DryRunClientError @@ -106,7 +107,9 @@ class BaseResponse(_TemplateEnvironmentMixin): default_region = 'us-east-1' # to extract region, use [^.] - region_regex = r'\.([^.]+?)\.amazonaws\.com' + region_regex = re.compile(r'\.(?P[a-z]{2}-[a-z]+-\d{1})\.amazonaws\.com') + param_list_regex = re.compile(r'(.*)\.(\d+)\.') + access_key_regex = re.compile(r'AWS.*(?P(? len(param_prefix) and \ + not name[len(param_prefix):].startswith('.'): + continue + + match = self.param_list_regex.search(name[len(param_prefix):]) if len(name) > len(param_prefix) else None + if match: + prefix = param_prefix + match.group(1) + value = self._get_multi_param(prefix) + tracked_prefixes.add(prefix) + name = prefix + value_dict[name] = value + else: + value_dict[name] = value[0] + + if not value_dict: + return None + + if len(value_dict) > 1: + # strip off period prefix + value_dict = {name[len(param_prefix) + 1:]: value for name, value in value_dict.items()} + else: + value_dict = list(value_dict.values())[0] + + return value_dict + def _get_multi_param(self, param_prefix): """ Given a querystring of ?LaunchConfigurationNames.member.1=my-test-1&LaunchConfigurationNames.member.2=my-test-2 @@ -323,12 +390,13 @@ class BaseResponse(_TemplateEnvironmentMixin): values = [] index = 1 while True: - try: - values.append(self.querystring[prefix + str(index)][0]) - except KeyError: + value_dict = self._get_multi_param_helper(prefix + str(index)) + if not value_dict: break - else: - index += 1 + + values.append(value_dict) + index += 1 + return values def _get_dict_param(self, param_prefix): @@ -432,6 +500,54 @@ class BaseResponse(_TemplateEnvironmentMixin): return results + def _get_object_map(self, prefix, name='Name', value='Value'): + """ + Given a query dict like + { + Prefix.1.Name: [u'event'], + Prefix.1.Value.StringValue: [u'order_cancelled'], + Prefix.1.Value.DataType: [u'String'], + Prefix.2.Name: [u'store'], + Prefix.2.Value.StringValue: [u'example_corp'], + Prefix.2.Value.DataType [u'String'], + } + + returns + { + 'event': { + 'DataType': 'String', + 'StringValue': 'example_corp' + }, + 'store': { + 'DataType': 'String', + 'StringValue': 'order_cancelled' + } + } + """ + object_map = {} + index = 1 + while True: + # Loop through looking for keys representing object name + name_key = '{0}.{1}.{2}'.format(prefix, index, name) + obj_name = self.querystring.get(name_key) + if not obj_name: + # Found all keys + break + + obj = {} + value_key_prefix = '{0}.{1}.{2}.'.format( + prefix, index, value) + for k, v in self.querystring.items(): + if k.startswith(value_key_prefix): + _, value_key = k.split(value_key_prefix, 1) + obj[value_key] = v[0] + + object_map[obj_name[0]] = obj + + index += 1 + + return object_map + @property def request_json(self): return 'JSON' in self.querystring.get('ContentType', []) @@ -514,7 +630,7 @@ class AWSServiceSpec(object): def __init__(self, path): self.path = resource_filename('botocore', path) - with open(self.path) as f: + with io.open(self.path, 'r', encoding='utf-8') as f: spec = json.load(f) self.metadata = spec['metadata'] self.operations = spec['operations'] @@ -609,6 +725,8 @@ def to_str(value, spec): return str(value) elif vtype == 'float': return str(value) + elif vtype == 'double': + return str(value) elif vtype == 'timestamp': return datetime.datetime.utcfromtimestamp( value).replace(tzinfo=pytz.utc).isoformat() @@ -628,6 +746,8 @@ def from_str(value, spec): return int(value) elif vtype == 'float': return float(value) + elif vtype == 'double': + return float(value) elif vtype == 'timestamp': return value elif vtype == 'string': diff --git a/moto/core/utils.py b/moto/core/utils.py index 43f05672e..ca670e871 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -8,6 +8,7 @@ import random import re import six import string +from six.moves.urllib.parse import urlparse REQUEST_ID_LONG = string.digits + string.ascii_uppercase @@ -18,6 +19,8 @@ def camelcase_to_underscores(argument): python underscore variable like the_new_attribute''' result = '' prev_char_title = True + if not argument: + return argument for index, char in enumerate(argument): try: next_char_title = argument[index + 1].istitle() @@ -277,10 +280,20 @@ def amzn_request_id(f): # Update request ID in XML try: - body = body.replace('{{ requestid }}', request_id) + body = re.sub(r'(?<=).*(?=<\/RequestId>)', request_id, body) except Exception: # Will just ignore if it cant work on bytes (which are str's on python2) pass return status, headers, body return _wrapper + + +def path_url(url): + parsed_url = urlparse(url) + path = parsed_url.path + if not path: + path = '/' + if parsed_url.query: + path = path + '?' + parsed_url.query + return path diff --git a/moto/dynamodb/responses.py b/moto/dynamodb/responses.py index d4f832be2..990069a46 100644 --- a/moto/dynamodb/responses.py +++ b/moto/dynamodb/responses.py @@ -62,13 +62,13 @@ class DynamoHandler(BaseResponse): name = body['TableName'] key_schema = body['KeySchema'] - hash_hey = key_schema['HashKeyElement'] - hash_key_attr = hash_hey['AttributeName'] - hash_key_type = hash_hey['AttributeType'] + hash_key = key_schema['HashKeyElement'] + hash_key_attr = hash_key['AttributeName'] + hash_key_type = hash_key['AttributeType'] - range_hey = key_schema.get('RangeKeyElement', {}) - range_key_attr = range_hey.get('AttributeName') - range_key_type = range_hey.get('AttributeType') + range_key = key_schema.get('RangeKeyElement', {}) + range_key_attr = range_key.get('AttributeName') + range_key_type = range_key.get('AttributeType') throughput = body["ProvisionedThroughput"] read_units = throughput["ReadCapacityUnits"] diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 68051460e..151a314f1 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -1,6 +1,94 @@ from __future__ import unicode_literals import re import six +import re +from collections import deque +from collections import namedtuple + + +def get_filter_expression(expr, names, values): + """ + Parse a filter expression into an Op. + + Examples + expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)' + expr = 'Id > 5 AND Subs < 7' + """ + parser = ConditionExpressionParser(expr, names, values) + return parser.parse() + + +def get_expected(expected): + """ + Parse a filter expression into an Op. + + Examples + expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)' + expr = 'Id > 5 AND Subs < 7' + """ + ops = { + 'EQ': OpEqual, + 'NE': OpNotEqual, + 'LE': OpLessThanOrEqual, + 'LT': OpLessThan, + 'GE': OpGreaterThanOrEqual, + 'GT': OpGreaterThan, + 'NOT_NULL': FuncAttrExists, + 'NULL': FuncAttrNotExists, + 'CONTAINS': FuncContains, + 'NOT_CONTAINS': FuncNotContains, + 'BEGINS_WITH': FuncBeginsWith, + 'IN': FuncIn, + 'BETWEEN': FuncBetween, + } + + # NOTE: Always uses ConditionalOperator=AND + conditions = [] + for key, cond in expected.items(): + path = AttributePath([key]) + if 'Exists' in cond: + if cond['Exists']: + conditions.append(FuncAttrExists(path)) + else: + conditions.append(FuncAttrNotExists(path)) + elif 'Value' in cond: + conditions.append(OpEqual(path, AttributeValue(cond['Value']))) + elif 'ComparisonOperator' in cond: + operator_name = cond['ComparisonOperator'] + values = [ + AttributeValue(v) + for v in cond.get("AttributeValueList", [])] + OpClass = ops[operator_name] + conditions.append(OpClass(path, *values)) + + # NOTE: Ignore ConditionalOperator + ConditionalOp = OpAnd + if conditions: + output = conditions[0] + for condition in conditions[1:]: + output = ConditionalOp(output, condition) + else: + return OpDefault(None, None) + + return output + + +class Op(object): + """ + Base class for a FilterExpression operator + """ + OP = '' + + def __init__(self, lhs, rhs): + self.lhs = lhs + self.rhs = rhs + + def expr(self, item): + raise NotImplementedError("Expr not defined for {0}".format(type(self))) + + def __repr__(self): + return '({0} {1} {2})'.format(self.lhs, self.OP, self.rhs) + # TODO add tests for all of these EQ_FUNCTION = lambda item_value, test_value: item_value == test_value # flake8: noqa @@ -29,8 +117,10 @@ COMPARISON_FUNCS = { 'GT': GT_FUNCTION, '>': GT_FUNCTION, - 'NULL': lambda item_value: item_value is None, - 'NOT_NULL': lambda item_value: item_value is not None, + # NULL means the value should not exist at all + 'NULL': lambda item_value: False, + # NOT_NULL means the value merely has to exist, and values of None are valid + 'NOT_NULL': lambda item_value: True, 'CONTAINS': lambda item_value, test_value: test_value in item_value, 'NOT_CONTAINS': lambda item_value, test_value: test_value not in item_value, 'BEGINS_WITH': lambda item_value, test_value: item_value.startswith(test_value), @@ -47,290 +137,799 @@ class RecursionStopIteration(StopIteration): pass -def get_filter_expression(expr, names, values): - # Examples - # expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)' - # expr = 'Id > 5 AND Subs < 7' - if names is None: - names = {} - if values is None: - values = {} +class ConditionExpressionParser: + def __init__(self, condition_expression, expression_attribute_names, + expression_attribute_values): + self.condition_expression = condition_expression + self.expression_attribute_names = expression_attribute_names + self.expression_attribute_values = expression_attribute_values - # Do substitutions - for key, value in names.items(): - expr = expr.replace(key, value) + def parse(self): + """Returns a syntax tree for the expression. - # Store correct types of values for use later - values_map = {} - for key, value in values.items(): - if 'N' in value: - values_map[key] = float(value['N']) - elif 'BOOL' in value: - values_map[key] = value['BOOL'] - elif 'S' in value: - values_map[key] = value['S'] - elif 'NS' in value: - values_map[key] = tuple(value['NS']) - elif 'SS' in value: - values_map[key] = tuple(value['SS']) - elif 'L' in value: - values_map[key] = tuple(value['L']) + The tree, and all of the nodes in the tree are a tuple of + - kind: str + - children/value: + list of nodes for parent nodes + value for leaf nodes + + Raises ValueError if the condition expression is invalid + Raises KeyError if expression attribute names/values are invalid + + Here are the types of nodes that can be returned. + The types of child nodes are denoted with a colon (:). + An arbitrary number of children is denoted with ... + + Condition: + ('OR', [lhs : Condition, rhs : Condition]) + ('AND', [lhs: Condition, rhs: Condition]) + ('NOT', [argument: Condition]) + ('PARENTHESES', [argument: Condition]) + ('FUNCTION', [('LITERAL', function_name: str), argument: Operand, ...]) + ('BETWEEN', [query: Operand, low: Operand, high: Operand]) + ('IN', [query: Operand, possible_value: Operand, ...]) + ('COMPARISON', [lhs: Operand, ('LITERAL', comparator: str), rhs: Operand]) + + Operand: + ('EXPRESSION_ATTRIBUTE_VALUE', value: dict, e.g. {'S': 'foobar'}) + ('PATH', [('LITERAL', path_element: str), ...]) + NOTE: Expression attribute names will be expanded + ('FUNCTION', [('LITERAL', 'size'), argument: Operand]) + + Literal: + ('LITERAL', value: str) + + """ + if not self.condition_expression: + return OpDefault(None, None) + nodes = self._lex_condition_expression() + nodes = self._parse_paths(nodes) + # NOTE: The docs say that functions should be parsed after + # IN, BETWEEN, and comparisons like <=. + # However, these expressions are invalid as function arguments, + # so it is okay to parse functions first. This needs to be done + # to interpret size() correctly as an operand. + nodes = self._apply_functions(nodes) + nodes = self._apply_comparator(nodes) + nodes = self._apply_in(nodes) + nodes = self._apply_between(nodes) + nodes = self._apply_parens_and_booleans(nodes) + node = nodes[0] + op = self._make_op_condition(node) + return op + + class Kind: + """Enum defining types of nodes in the syntax tree.""" + + # Condition nodes + # --------------- + OR = 'OR' + AND = 'AND' + NOT = 'NOT' + PARENTHESES = 'PARENTHESES' + FUNCTION = 'FUNCTION' + BETWEEN = 'BETWEEN' + IN = 'IN' + COMPARISON = 'COMPARISON' + + # Operand nodes + # ------------- + EXPRESSION_ATTRIBUTE_VALUE = 'EXPRESSION_ATTRIBUTE_VALUE' + PATH = 'PATH' + + # Literal nodes + # -------------- + LITERAL = 'LITERAL' + + + class Nonterminal: + """Enum defining nonterminals for productions.""" + + CONDITION = 'CONDITION' + OPERAND = 'OPERAND' + COMPARATOR = 'COMPARATOR' + FUNCTION_NAME = 'FUNCTION_NAME' + IDENTIFIER = 'IDENTIFIER' + AND = 'AND' + OR = 'OR' + NOT = 'NOT' + BETWEEN = 'BETWEEN' + IN = 'IN' + COMMA = 'COMMA' + LEFT_PAREN = 'LEFT_PAREN' + RIGHT_PAREN = 'RIGHT_PAREN' + WHITESPACE = 'WHITESPACE' + + + Node = namedtuple('Node', ['nonterminal', 'kind', 'text', 'value', 'children']) + + def _lex_condition_expression(self): + nodes = deque() + remaining_expression = self.condition_expression + while remaining_expression: + node, remaining_expression = \ + self._lex_one_node(remaining_expression) + if node.nonterminal == self.Nonterminal.WHITESPACE: + continue + nodes.append(node) + return nodes + + def _lex_one_node(self, remaining_expression): + # TODO: Handle indexing like [1] + attribute_regex = '(:|#)?[A-z0-9\-_]+' + patterns = [( + self.Nonterminal.WHITESPACE, re.compile('^ +') + ), ( + self.Nonterminal.COMPARATOR, re.compile( + '^(' + # Put long expressions first for greedy matching + '<>|' + '<=|' + '>=|' + '=|' + '<|' + '>)'), + ), ( + self.Nonterminal.OPERAND, re.compile( + '^' + + attribute_regex + '(\.' + attribute_regex + '|\[[0-9]\])*') + ), ( + self.Nonterminal.COMMA, re.compile('^,') + ), ( + self.Nonterminal.LEFT_PAREN, re.compile('^\(') + ), ( + self.Nonterminal.RIGHT_PAREN, re.compile('^\)') + )] + + for nonterminal, pattern in patterns: + match = pattern.match(remaining_expression) + if match: + match_text = match.group() + break + else: # pragma: no cover + raise ValueError("Cannot parse condition starting at: " + + remaining_expression) + + value = match_text + node = self.Node( + nonterminal=nonterminal, + kind=self.Kind.LITERAL, + text=match_text, + value=match_text, + children=[]) + + remaining_expression = remaining_expression[len(match_text):] + + return node, remaining_expression + + def _parse_paths(self, nodes): + output = deque() + + while nodes: + node = nodes.popleft() + + if node.nonterminal == self.Nonterminal.OPERAND: + path = node.value.replace('[', '.[').split('.') + children = [ + self._parse_path_element(name) + for name in path] + if len(children) == 1: + child = children[0] + if child.nonterminal != self.Nonterminal.IDENTIFIER: + output.append(child) + continue + else: + for child in children: + self._assert( + child.nonterminal == self.Nonterminal.IDENTIFIER, + "Cannot use %s in path" % child.text, [node]) + output.append(self.Node( + nonterminal=self.Nonterminal.OPERAND, + kind=self.Kind.PATH, + text=node.text, + value=None, + children=children)) + else: + output.append(node) + return output + + def _parse_path_element(self, name): + reserved = { + 'and': self.Nonterminal.AND, + 'or': self.Nonterminal.OR, + 'in': self.Nonterminal.IN, + 'between': self.Nonterminal.BETWEEN, + 'not': self.Nonterminal.NOT, + } + + functions = { + 'attribute_exists', + 'attribute_not_exists', + 'attribute_type', + 'begins_with', + 'contains', + 'size', + } + + + if name.lower() in reserved: + # e.g. AND + nonterminal = reserved[name.lower()] + return self.Node( + nonterminal=nonterminal, + kind=self.Kind.LITERAL, + text=name, + value=name, + children=[]) + elif name in functions: + # e.g. attribute_exists + return self.Node( + nonterminal=self.Nonterminal.FUNCTION_NAME, + kind=self.Kind.LITERAL, + text=name, + value=name, + children=[]) + elif name.startswith(':'): + # e.g. :value0 + return self.Node( + nonterminal=self.Nonterminal.OPERAND, + kind=self.Kind.EXPRESSION_ATTRIBUTE_VALUE, + text=name, + value=self._lookup_expression_attribute_value(name), + children=[]) + elif name.startswith('#'): + # e.g. #name0 + return self.Node( + nonterminal=self.Nonterminal.IDENTIFIER, + kind=self.Kind.LITERAL, + text=name, + value=self._lookup_expression_attribute_name(name), + children=[]) + elif name.startswith('['): + # e.g. [123] + if not name.endswith(']'): # pragma: no cover + raise ValueError("Bad path element %s" % name) + return self.Node( + nonterminal=self.Nonterminal.IDENTIFIER, + kind=self.Kind.LITERAL, + text=name, + value=int(name[1:-1]), + children=[]) else: - raise NotImplementedError() + # e.g. ItemId + return self.Node( + nonterminal=self.Nonterminal.IDENTIFIER, + kind=self.Kind.LITERAL, + text=name, + value=name, + children=[]) - # Remove all spaces, tbf we could just skip them in the next step. - # The number of known options is really small so we can do a fair bit of cheating - expr = list(expr.strip()) + def _lookup_expression_attribute_value(self, name): + return self.expression_attribute_values[name] - # DodgyTokenisation stage 1 - def is_value(val): - return val not in ('<', '>', '=', '(', ')') + def _lookup_expression_attribute_name(self, name): + return self.expression_attribute_names[name] - def contains_keyword(val): - for kw in ('BETWEEN', 'IN', 'AND', 'OR', 'NOT'): - if kw in val: - return kw - return None + # NOTE: The following constructions are ordered from high precedence to low precedence + # according to + # https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.OperatorsAndFunctions.html#Expressions.OperatorsAndFunctions.Precedence + # + # = <> < <= > >= + # IN + # BETWEEN + # attribute_exists attribute_not_exists begins_with contains + # Parentheses + # NOT + # AND + # OR + # + # The grammar is taken from + # https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.OperatorsAndFunctions.html#Expressions.OperatorsAndFunctions.Syntax + # + # condition-expression ::= + # operand comparator operand + # operand BETWEEN operand AND operand + # operand IN ( operand (',' operand (, ...) )) + # function + # condition AND condition + # condition OR condition + # NOT condition + # ( condition ) + # + # comparator ::= + # = + # <> + # < + # <= + # > + # >= + # + # function ::= + # attribute_exists (path) + # attribute_not_exists (path) + # attribute_type (path, type) + # begins_with (path, substr) + # contains (path, operand) + # size (path) - def is_function(val): - return val in ('attribute_exists', 'attribute_not_exists', 'attribute_type', 'begins_with', 'contains', 'size') + def _matches(self, nodes, production): + """Check if the nodes start with the given production. - # Does the main part of splitting between sections of characters - tokens = [] - stack = '' - while len(expr) > 0: - current_char = expr.pop(0) + Parameters + ---------- + nodes: list of Node + production: list of str + The name of a Nonterminal, or '*' for anything - if current_char == ' ': - if len(stack) > 0: - tokens.append(stack) - stack = '' - elif current_char == ',': # Split params , - if len(stack) > 0: - tokens.append(stack) - stack = '' - elif is_value(current_char): - stack += current_char + """ + if len(nodes) < len(production): + return False + for i in range(len(production)): + if production[i] == '*': + continue + expected = getattr(self.Nonterminal, production[i]) + if nodes[i].nonterminal != expected: + return False + return True - kw = contains_keyword(stack) - if kw is not None: - # We have a kw in the stack, could be AND or something like 5AND - tmp = stack.replace(kw, '') - if len(tmp) > 0: - tokens.append(tmp) - tokens.append(kw) - stack = '' - else: - if len(stack) > 0: - tokens.append(stack) - tokens.append(current_char) - stack = '' - if len(stack) > 0: - tokens.append(stack) + def _apply_comparator(self, nodes): + """Apply condition := operand comparator operand.""" + output = deque() - def is_op(val): - return val in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') + while nodes: + if self._matches(nodes, ['*', 'COMPARATOR']): + self._assert( + self._matches(nodes, ['OPERAND', 'COMPARATOR', 'OPERAND']), + "Bad comparison", list(nodes)[:3]) + lhs = nodes.popleft() + comparator = nodes.popleft() + rhs = nodes.popleft() + nodes.appendleft(self.Node( + nonterminal=self.Nonterminal.CONDITION, + kind=self.Kind.COMPARISON, + text=" ".join([ + lhs.text, + comparator.text, + rhs.text]), + value=None, + children=[lhs, comparator, rhs])) + else: + output.append(nodes.popleft()) + return output - # DodgyTokenisation stage 2, it groups together some elements to make RPN'ing it later easier. - def handle_token(token, tokens2, token_iterator): - # ok so this essentially groups up some tokens to make later parsing easier, - # when it encounters brackets it will recurse and then unrecurse when RecursionStopIteration is raised. - if token == ')': - raise RecursionStopIteration() # Should be recursive so this should work - elif token == '(': - temp_list = [] - - try: + def _apply_in(self, nodes): + """Apply condition := operand IN ( operand , ... ).""" + output = deque() + while nodes: + if self._matches(nodes, ['*', 'IN']): + self._assert( + self._matches(nodes, ['OPERAND', 'IN', 'LEFT_PAREN']), + "Bad IN expression", list(nodes)[:3]) + lhs = nodes.popleft() + in_node = nodes.popleft() + left_paren = nodes.popleft() + all_children = [lhs, in_node, left_paren] + rhs = [] while True: - next_token = six.next(token_iterator) - handle_token(next_token, temp_list, token_iterator) - except RecursionStopIteration: - pass # Continue - except StopIteration: - ValueError('Malformed filter expression, type1') - - # Sigh, we only want to group a tuple if it doesnt contain operators - if any([is_op(item) for item in temp_list]): - # Its an expression - tokens2.append('(') - tokens2.extend(temp_list) - tokens2.append(')') + if self._matches(nodes, ['OPERAND', 'COMMA']): + operand = nodes.popleft() + separator = nodes.popleft() + all_children += [operand, separator] + rhs.append(operand) + elif self._matches(nodes, ['OPERAND', 'RIGHT_PAREN']): + operand = nodes.popleft() + separator = nodes.popleft() + all_children += [operand, separator] + rhs.append(operand) + break # Close + else: + self._assert( + False, + "Bad IN expression starting at", nodes) + nodes.appendleft(self.Node( + nonterminal=self.Nonterminal.CONDITION, + kind=self.Kind.IN, + text=" ".join([t.text for t in all_children]), + value=None, + children=[lhs] + rhs)) else: - tokens2.append(tuple(temp_list)) - elif token == 'BETWEEN': - field = tokens2.pop() - # if values map contains a number, it would be a float - # so we need to int() it anyway - op1 = six.next(token_iterator) - op1 = int(values_map.get(op1, op1)) - and_op = six.next(token_iterator) - assert and_op == 'AND' - op2 = six.next(token_iterator) - op2 = int(values_map.get(op2, op2)) - tokens2.append(['between', field, op1, op2]) - elif is_function(token): - function_list = [token] + output.append(nodes.popleft()) + return output - lbracket = six.next(token_iterator) - assert lbracket == '(' - - next_token = six.next(token_iterator) - while next_token != ')': - function_list.append(next_token) - next_token = six.next(token_iterator) - - tokens2.append(function_list) - else: - # Convert tokens back to real types - if token in values_map: - token = values_map[token] - - # Need to join >= <= <> - if len(tokens2) > 0 and ((tokens2[-1] == '>' and token == '=') or (tokens2[-1] == '<' and token == '=') or (tokens2[-1] == '<' and token == '>')): - tokens2.append(tokens2.pop() + token) + def _apply_between(self, nodes): + """Apply condition := operand BETWEEN operand AND operand.""" + output = deque() + while nodes: + if self._matches(nodes, ['*', 'BETWEEN']): + self._assert( + self._matches(nodes, ['OPERAND', 'BETWEEN', 'OPERAND', + 'AND', 'OPERAND']), + "Bad BETWEEN expression", list(nodes)[:5]) + lhs = nodes.popleft() + between_node = nodes.popleft() + low = nodes.popleft() + and_node = nodes.popleft() + high = nodes.popleft() + all_children = [lhs, between_node, low, and_node, high] + nodes.appendleft(self.Node( + nonterminal=self.Nonterminal.CONDITION, + kind=self.Kind.BETWEEN, + text=" ".join([t.text for t in all_children]), + value=None, + children=[lhs, low, high])) else: - tokens2.append(token) + output.append(nodes.popleft()) + return output - tokens2 = [] - token_iterator = iter(tokens) - for token in token_iterator: - handle_token(token, tokens2, token_iterator) - - # Start of the Shunting-Yard algorithm. <-- Proper beast algorithm! - def is_number(val): - return val not in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') - - OPS = {'<': 5, '>': 5, '=': 5, '>=': 5, '<=': 5, '<>': 5, 'IN': 8, 'AND': 11, 'OR': 12, 'NOT': 10, 'BETWEEN': 9, '(': 100, ')': 100} - - def shunting_yard(token_list): - output = [] - op_stack = [] - - # Basically takes in an infix notation calculation, converts it to a reverse polish notation where there is no - # ambiguity on which order operators are applied. - while len(token_list) > 0: - token = token_list.pop(0) - - if token == '(': - op_stack.append(token) - elif token == ')': - while len(op_stack) > 0 and op_stack[-1] != '(': - output.append(op_stack.pop()) - lbracket = op_stack.pop() - assert lbracket == '(' - - elif is_number(token): - output.append(token) + def _apply_functions(self, nodes): + """Apply condition := function_name (operand , ...).""" + output = deque() + either_kind = {self.Kind.PATH, self.Kind.EXPRESSION_ATTRIBUTE_VALUE} + expected_argument_kind_map = { + 'attribute_exists': [{self.Kind.PATH}], + 'attribute_not_exists': [{self.Kind.PATH}], + 'attribute_type': [either_kind, {self.Kind.EXPRESSION_ATTRIBUTE_VALUE}], + 'begins_with': [either_kind, either_kind], + 'contains': [either_kind, either_kind], + 'size': [{self.Kind.PATH}], + } + while nodes: + if self._matches(nodes, ['FUNCTION_NAME']): + self._assert( + self._matches(nodes, ['FUNCTION_NAME', 'LEFT_PAREN', + 'OPERAND', '*']), + "Bad function expression at", list(nodes)[:4]) + function_name = nodes.popleft() + left_paren = nodes.popleft() + all_children = [function_name, left_paren] + arguments = [] + while True: + if self._matches(nodes, ['OPERAND', 'COMMA']): + operand = nodes.popleft() + separator = nodes.popleft() + all_children += [operand, separator] + arguments.append(operand) + elif self._matches(nodes, ['OPERAND', 'RIGHT_PAREN']): + operand = nodes.popleft() + separator = nodes.popleft() + all_children += [operand, separator] + arguments.append(operand) + break # Close paren + else: + self._assert( + False, + "Bad function expression", all_children + list(nodes)[:2]) + expected_kinds = expected_argument_kind_map[function_name.value] + self._assert( + len(arguments) == len(expected_kinds), + "Wrong number of arguments in", all_children) + for i in range(len(expected_kinds)): + self._assert( + arguments[i].kind in expected_kinds[i], + "Wrong type for argument %d in" % i, all_children) + if function_name.value == 'size': + nonterminal = self.Nonterminal.OPERAND + else: + nonterminal = self.Nonterminal.CONDITION + nodes.appendleft(self.Node( + nonterminal=nonterminal, + kind=self.Kind.FUNCTION, + text=" ".join([t.text for t in all_children]), + value=None, + children=[function_name] + arguments)) else: - # Must be operator kw + output.append(nodes.popleft()) + return output - # Cheat, NOT is our only RIGHT associative operator, should really have dict of operator associativity - while len(op_stack) > 0 and OPS[op_stack[-1]] <= OPS[token] and op_stack[-1] != 'NOT': - output.append(op_stack.pop()) - op_stack.append(token) - while len(op_stack) > 0: - output.append(op_stack.pop()) + def _apply_parens_and_booleans(self, nodes, left_paren=None): + """Apply condition := ( condition ) and booleans.""" + output = deque() + while nodes: + if self._matches(nodes, ['LEFT_PAREN']): + parsed = self._apply_parens_and_booleans(nodes, left_paren=nodes.popleft()) + self._assert( + len(parsed) >= 1, + "Failed to close parentheses at", nodes) + parens = parsed.popleft() + self._assert( + parens.kind == self.Kind.PARENTHESES, + "Failed to close parentheses at", nodes) + output.append(parens) + nodes = parsed + elif self._matches(nodes, ['RIGHT_PAREN']): + self._assert( + left_paren is not None, + "Unmatched ) at", nodes) + close_paren = nodes.popleft() + children = self._apply_booleans(output) + all_children = [left_paren] + list(children) + [close_paren] + return deque([ + self.Node( + nonterminal=self.Nonterminal.CONDITION, + kind=self.Kind.PARENTHESES, + text=" ".join([t.text for t in all_children]), + value=None, + children=list(children), + )] + list(nodes)) + else: + output.append(nodes.popleft()) + + self._assert( + left_paren is None, + "Unmatched ( at", list(output)) + return self._apply_booleans(output) + + def _apply_booleans(self, nodes): + """Apply and, or, and not constructions.""" + nodes = self._apply_not(nodes) + nodes = self._apply_and(nodes) + nodes = self._apply_or(nodes) + # The expression should reduce to a single condition + self._assert( + len(nodes) == 1, + "Unexpected expression at", list(nodes)[1:]) + self._assert( + nodes[0].nonterminal == self.Nonterminal.CONDITION, + "Incomplete condition", nodes) + return nodes + + def _apply_not(self, nodes): + """Apply condition := NOT condition.""" + output = deque() + while nodes: + if self._matches(nodes, ['NOT']): + self._assert( + self._matches(nodes, ['NOT', 'CONDITION']), + "Bad NOT expression", list(nodes)[:2]) + not_node = nodes.popleft() + child = nodes.popleft() + nodes.appendleft(self.Node( + nonterminal=self.Nonterminal.CONDITION, + kind=self.Kind.NOT, + text=" ".join([not_node.text, child.text]), + value=None, + children=[child])) + else: + output.append(nodes.popleft()) return output - output = shunting_yard(tokens2) - - # Hacky function to convert dynamo functions (which are represented as lists) to their Class equivalent - def to_func(val): - if isinstance(val, list): - func_name = val.pop(0) - # Expand rest of the list to arguments - val = FUNC_CLASS[func_name](*val) - - return val - - # Simple reverse polish notation execution. Builts up a nested filter object. - # The filter object then takes a dynamo item and returns true/false - stack = [] - for token in output: - if is_op(token): - op_cls = OP_CLASS[token] - - if token == 'NOT': - op1 = stack.pop() - op2 = True + def _apply_and(self, nodes): + """Apply condition := condition AND condition.""" + output = deque() + while nodes: + if self._matches(nodes, ['*', 'AND']): + self._assert( + self._matches(nodes, ['CONDITION', 'AND', 'CONDITION']), + "Bad AND expression", list(nodes)[:3]) + lhs = nodes.popleft() + and_node = nodes.popleft() + rhs = nodes.popleft() + all_children = [lhs, and_node, rhs] + nodes.appendleft(self.Node( + nonterminal=self.Nonterminal.CONDITION, + kind=self.Kind.AND, + text=" ".join([t.text for t in all_children]), + value=None, + children=[lhs, rhs])) else: - op2 = stack.pop() - op1 = stack.pop() + output.append(nodes.popleft()) - stack.append(op_cls(op1, op2)) + return output + + def _apply_or(self, nodes): + """Apply condition := condition OR condition.""" + output = deque() + while nodes: + if self._matches(nodes, ['*', 'OR']): + self._assert( + self._matches(nodes, ['CONDITION', 'OR', 'CONDITION']), + "Bad OR expression", list(nodes)[:3]) + lhs = nodes.popleft() + or_node = nodes.popleft() + rhs = nodes.popleft() + all_children = [lhs, or_node, rhs] + nodes.appendleft(self.Node( + nonterminal=self.Nonterminal.CONDITION, + kind=self.Kind.OR, + text=" ".join([t.text for t in all_children]), + value=None, + children=[lhs, rhs])) + else: + output.append(nodes.popleft()) + + return output + + def _make_operand(self, node): + if node.kind == self.Kind.PATH: + return AttributePath([child.value for child in node.children]) + elif node.kind == self.Kind.EXPRESSION_ATTRIBUTE_VALUE: + return AttributeValue(node.value) + elif node.kind == self.Kind.FUNCTION: + # size() + function_node = node.children[0] + arguments = node.children[1:] + function_name = function_node.value + arguments = [self._make_operand(arg) for arg in arguments] + return FUNC_CLASS[function_name](*arguments) + else: # pragma: no cover + raise ValueError("Unknown operand: %r" % node) + + + def _make_op_condition(self, node): + if node.kind == self.Kind.OR: + lhs, rhs = node.children + return OpOr( + self._make_op_condition(lhs), + self._make_op_condition(rhs)) + elif node.kind == self.Kind.AND: + lhs, rhs = node.children + return OpAnd( + self._make_op_condition(lhs), + self._make_op_condition(rhs)) + elif node.kind == self.Kind.NOT: + child, = node.children + return OpNot(self._make_op_condition(child)) + elif node.kind == self.Kind.PARENTHESES: + child, = node.children + return self._make_op_condition(child) + elif node.kind == self.Kind.FUNCTION: + function_node = node.children[0] + arguments = node.children[1:] + function_name = function_node.value + arguments = [self._make_operand(arg) for arg in arguments] + return FUNC_CLASS[function_name](*arguments) + elif node.kind == self.Kind.BETWEEN: + query, low, high = node.children + return FuncBetween( + self._make_operand(query), + self._make_operand(low), + self._make_operand(high)) + elif node.kind == self.Kind.IN: + query = node.children[0] + possible_values = node.children[1:] + query = self._make_operand(query) + possible_values = [self._make_operand(v) for v in possible_values] + return FuncIn(query, *possible_values) + elif node.kind == self.Kind.COMPARISON: + lhs, comparator, rhs = node.children + return COMPARATOR_CLASS[comparator.value]( + self._make_operand(lhs), + self._make_operand(rhs)) + else: # pragma: no cover + raise ValueError("Unknown expression node kind %r" % node.kind) + + def _print_debug(self, nodes): # pragma: no cover + print('ROOT') + for node in nodes: + self._print_node_recursive(node, depth=1) + + def _print_node_recursive(self, node, depth=0): # pragma: no cover + if len(node.children) > 0: + print(' ' * depth, node.nonterminal, node.kind) + for child in node.children: + self._print_node_recursive(child, depth=depth + 1) else: - stack.append(to_func(token)) - - result = stack.pop(0) - if len(stack) > 0: - raise ValueError('Malformed filter expression, type2') - - return result + print(' ' * depth, node.nonterminal, node.kind, node.value) -class Op(object): - """ - Base class for a FilterExpression operator - """ - OP = '' - def __init__(self, lhs, rhs): - self.lhs = lhs - self.rhs = rhs + def _assert(self, condition, message, nodes): + if not condition: + raise ValueError(message + " " + " ".join([t.text for t in nodes])) + + +class Operand(object): + def expr(self, item): + raise NotImplementedError + + def get_type(self, item): + raise NotImplementedError + + +class AttributePath(Operand): + def __init__(self, path): + """Initialize the AttributePath. + + Parameters + ---------- + path: list of int/str - def _lhs(self, item): """ - :type item: moto.dynamodb2.models.Item - """ - lhs = self.lhs - if isinstance(self.lhs, (Op, Func)): - lhs = self.lhs.expr(item) - elif isinstance(self.lhs, six.string_types): - try: - lhs = item.attrs[self.lhs].cast_value - except Exception: - pass + assert len(path) >= 1 + self.path = path - return lhs + def _get_attr(self, item): + if item is None: + return None - def _rhs(self, item): - rhs = self.rhs - if isinstance(self.rhs, (Op, Func)): - rhs = self.rhs.expr(item) - elif isinstance(self.rhs, six.string_types): - try: - rhs = item.attrs[self.rhs].cast_value - except Exception: - pass - return rhs + base = self.path[0] + if base not in item.attrs: + return None + attr = item.attrs[base] + + for name in self.path[1:]: + attr = attr.child_attr(name) + if attr is None: + return None + + return attr def expr(self, item): - return True + attr = self._get_attr(item) + if attr is None: + return None + else: + return attr.cast_value + + def get_type(self, item): + attr = self._get_attr(item) + if attr is None: + return None + else: + return attr.type def __repr__(self): - return '({0} {1} {2})'.format(self.lhs, self.OP, self.rhs) + return ".".join(self.path) -class Func(object): - """ - Base class for a FilterExpression function - """ - FUNC = 'Unknown' +class AttributeValue(Operand): + def __init__(self, value): + """Initialize the AttributePath. + + Parameters + ---------- + value: dict + e.g. {'N': '1.234'} + + """ + self.type = list(value.keys())[0] + self.value = value[self.type] def expr(self, item): - return True + # TODO: Reuse DynamoType code + if self.type == 'N': + try: + return int(self.value) + except ValueError: + return float(self.value) + elif self.type in ['SS', 'NS', 'BS']: + sub_type = self.type[0] + return set([AttributeValue({sub_type: v}).expr(item) + for v in self.value]) + elif self.type == 'L': + return [AttributeValue(v).expr(item) for v in self.value] + elif self.type == 'M': + return dict([ + (k, AttributeValue(v).expr(item)) + for k, v in self.value.items()]) + else: + return self.value + return self.value + + def get_type(self, item): + return self.type def __repr__(self): - return 'Func(...)'.format(self.FUNC) + return repr(self.value) + + +class OpDefault(Op): + OP = 'NONE' + + def expr(self, item): + """If no condition is specified, always True.""" + return True class OpNot(Op): OP = 'NOT' - def expr(self, item): - lhs = self._lhs(item) + def __init__(self, lhs): + super(OpNot, self).__init__(lhs, None) + def expr(self, item): + lhs = self.lhs.expr(item) return not lhs def __str__(self): @@ -341,8 +940,8 @@ class OpAnd(Op): OP = 'AND' def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) + lhs = self.lhs.expr(item) + rhs = self.rhs.expr(item) return lhs and rhs @@ -350,8 +949,8 @@ class OpLessThan(Op): OP = '<' def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) + lhs = self.lhs.expr(item) + rhs = self.rhs.expr(item) return lhs < rhs @@ -359,8 +958,8 @@ class OpGreaterThan(Op): OP = '>' def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) + lhs = self.lhs.expr(item) + rhs = self.rhs.expr(item) return lhs > rhs @@ -368,8 +967,8 @@ class OpEqual(Op): OP = '=' def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) + lhs = self.lhs.expr(item) + rhs = self.rhs.expr(item) return lhs == rhs @@ -377,17 +976,17 @@ class OpNotEqual(Op): OP = '<>' def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) - return lhs == rhs + lhs = self.lhs.expr(item) + rhs = self.rhs.expr(item) + return lhs != rhs class OpLessThanOrEqual(Op): OP = '<=' def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) + lhs = self.lhs.expr(item) + rhs = self.rhs.expr(item) return lhs <= rhs @@ -395,8 +994,8 @@ class OpGreaterThanOrEqual(Op): OP = '>=' def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) + lhs = self.lhs.expr(item) + rhs = self.rhs.expr(item) return lhs >= rhs @@ -404,18 +1003,26 @@ class OpOr(Op): OP = 'OR' def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) - return lhs or rhs + lhs = self.lhs.expr(item) + return lhs or self.rhs.expr(item) -class OpIn(Op): - OP = 'IN' +class Func(object): + """ + Base class for a FilterExpression function + """ + FUNC = 'Unknown' + + def __init__(self, *arguments): + self.arguments = arguments def expr(self, item): - lhs = self._lhs(item) - rhs = self._rhs(item) - return lhs in rhs + raise NotImplementedError + + def __repr__(self): + return '{0}({1})'.format( + self.FUNC, + " ".join([repr(arg) for arg in self.arguments])) class FuncAttrExists(Func): @@ -423,19 +1030,14 @@ class FuncAttrExists(Func): def __init__(self, attribute): self.attr = attribute + super(FuncAttrExists, self).__init__(attribute) def expr(self, item): - return self.attr in item.attrs + return self.attr.get_type(item) is not None -class FuncAttrNotExists(Func): - FUNC = 'attribute_not_exists' - - def __init__(self, attribute): - self.attr = attribute - - def expr(self, item): - return self.attr not in item.attrs +def FuncAttrNotExists(attribute): + return OpNot(FuncAttrExists(attribute)) class FuncAttrType(Func): @@ -444,9 +1046,10 @@ class FuncAttrType(Func): def __init__(self, attribute, _type): self.attr = attribute self.type = _type + super(FuncAttrType, self).__init__(attribute, _type) def expr(self, item): - return self.attr in item.attrs and item.attrs[self.attr].type == self.type + return self.attr.get_type(item) == self.type.expr(item) class FuncBeginsWith(Func): @@ -455,9 +1058,14 @@ class FuncBeginsWith(Func): def __init__(self, attribute, substr): self.attr = attribute self.substr = substr + super(FuncBeginsWith, self).__init__(attribute, substr) def expr(self, item): - return self.attr in item.attrs and item.attrs[self.attr].type == 'S' and item.attrs[self.attr].value.startswith(self.substr) + if self.attr.get_type(item) != 'S': + return False + if self.substr.get_type(item) != 'S': + return False + return self.attr.expr(item).startswith(self.substr.expr(item)) class FuncContains(Func): @@ -466,51 +1074,67 @@ class FuncContains(Func): def __init__(self, attribute, operand): self.attr = attribute self.operand = operand + super(FuncContains, self).__init__(attribute, operand) def expr(self, item): - if self.attr not in item.attrs: - return False - - if item.attrs[self.attr].type in ('S', 'SS', 'NS', 'BS', 'L', 'M'): - return self.operand in item.attrs[self.attr].value + if self.attr.get_type(item) in ('S', 'SS', 'NS', 'BS', 'L'): + try: + return self.operand.expr(item) in self.attr.expr(item) + except TypeError: + return False return False +def FuncNotContains(attribute, operand): + return OpNot(FuncContains(attribute, operand)) + + class FuncSize(Func): - FUNC = 'contains' + FUNC = 'size' def __init__(self, attribute): self.attr = attribute + super(FuncSize, self).__init__(attribute) def expr(self, item): - if self.attr not in item.attrs: + if self.attr.get_type(item) is None: raise ValueError('Invalid attribute name {0}'.format(self.attr)) - if item.attrs[self.attr].type in ('S', 'SS', 'NS', 'B', 'BS', 'L', 'M'): - return len(item.attrs[self.attr].value) + if self.attr.get_type(item) in ('S', 'SS', 'NS', 'B', 'BS', 'L', 'M'): + return len(self.attr.expr(item)) raise ValueError('Invalid filter expression') class FuncBetween(Func): - FUNC = 'between' + FUNC = 'BETWEEN' def __init__(self, attribute, start, end): self.attr = attribute self.start = start self.end = end + super(FuncBetween, self).__init__(attribute, start, end) def expr(self, item): - if self.attr not in item.attrs: - raise ValueError('Invalid attribute name {0}'.format(self.attr)) - - return self.start <= item.attrs[self.attr].cast_value <= self.end + return self.start.expr(item) <= self.attr.expr(item) <= self.end.expr(item) -OP_CLASS = { - 'NOT': OpNot, - 'AND': OpAnd, - 'OR': OpOr, - 'IN': OpIn, +class FuncIn(Func): + FUNC = 'IN' + + def __init__(self, attribute, *possible_values): + self.attr = attribute + self.possible_values = possible_values + super(FuncIn, self).__init__(attribute, *possible_values) + + def expr(self, item): + for possible_value in self.possible_values: + if self.attr.expr(item) == possible_value.expr(item): + return True + + return False + + +COMPARATOR_CLASS = { '<': OpLessThan, '>': OpGreaterThan, '<=': OpLessThanOrEqual, diff --git a/moto/dynamodb2/exceptions.py b/moto/dynamodb2/exceptions.py new file mode 100644 index 000000000..9df973292 --- /dev/null +++ b/moto/dynamodb2/exceptions.py @@ -0,0 +1,2 @@ +class InvalidIndexNameError(ValueError): + pass diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index a4d8feb3c..29e90e7dc 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -5,13 +5,18 @@ import datetime import decimal import json import re +import uuid +import six import boto3 from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time from moto.core.exceptions import JsonRESTError -from .comparisons import get_comparison_func, get_filter_expression, Op +from .comparisons import get_comparison_func +from .comparisons import get_filter_expression +from .comparisons import get_expected +from .exceptions import InvalidIndexNameError class DynamoJsonEncoder(json.JSONEncoder): @@ -65,9 +70,35 @@ class DynamoType(object): return int(self.value) except ValueError: return float(self.value) + elif self.is_set(): + sub_type = self.type[0] + return set([DynamoType({sub_type: v}).cast_value + for v in self.value]) + elif self.is_list(): + return [DynamoType(v).cast_value for v in self.value] + elif self.is_map(): + return dict([ + (k, DynamoType(v).cast_value) + for k, v in self.value.items()]) else: return self.value + def child_attr(self, key): + """ + Get Map or List children by key. str for Map, int for List. + + Returns DynamoType or None. + """ + if isinstance(key, six.string_types) and self.is_map() and key in self.value: + return DynamoType(self.value[key]) + + if isinstance(key, int) and self.is_list(): + idx = key + if idx >= 0 and idx < len(self.value): + return DynamoType(self.value[idx]) + + return None + def to_json(self): return {self.type: self.value} @@ -85,6 +116,12 @@ class DynamoType(object): def is_set(self): return self.type == 'SS' or self.type == 'NS' or self.type == 'BS' + def is_list(self): + return self.type == 'L' + + def is_map(self): + return self.type == 'M' + def same_type(self, other): return self.type == other.type @@ -135,7 +172,9 @@ class Item(BaseModel): assert len(parts) % 2 == 0, "Mismatched operators and values in update expression: '{}'".format(update_expression) for action, valstr in zip(parts[:-1:2], parts[1::2]): action = action.upper() - values = valstr.split(',') + + # "Should" retain arguments in side (...) + values = re.split(r',(?![^(]*\))', valstr) for value in values: # A Real value value = value.lstrip(":").rstrip(",").strip() @@ -145,13 +184,58 @@ class Item(BaseModel): if action == "REMOVE": self.attrs.pop(value, None) elif action == 'SET': - key, value = value.split("=") + key, value = value.split("=", 1) key = key.strip() value = value.strip() - if value in expression_attribute_values: - self.attrs[key] = DynamoType(expression_attribute_values[value]) + + # If not exists, changes value to a default if needed, else its the same as it was + if value.startswith('if_not_exists'): + # Function signature + match = re.match(r'.*if_not_exists\s*\((?P.+),\s*(?P.+)\).*', value) + if not match: + raise TypeError + + path, value = match.groups() + + # If it already exists, get its value so we dont overwrite it + if path in self.attrs: + value = self.attrs[path] + + if type(value) != DynamoType: + if value in expression_attribute_values: + value = DynamoType(expression_attribute_values[value]) + else: + value = DynamoType({"S": value}) + + if '.' not in key: + self.attrs[key] = value else: - self.attrs[key] = DynamoType({"S": value}) + # Handle nested dict updates + key_parts = key.split('.') + attr = key_parts.pop(0) + if attr not in self.attrs: + raise ValueError + + last_val = self.attrs[attr].value + for key_part in key_parts: + # Hack but it'll do, traverses into a dict + last_val_type = list(last_val.keys()) + if last_val_type and last_val_type[0] == 'M': + last_val = last_val['M'] + + if key_part not in last_val: + last_val[key_part] = {'M': {}} + + last_val = last_val[key_part] + + # We have reference to a nested object but we cant just assign to it + current_type = list(last_val.keys())[0] + if current_type == value.type: + last_val[current_type] = value.value + else: + last_val[value.type] = value.value + del last_val[current_type] + elif action == 'ADD': key, value = value.split(" ", 1) key = key.strip() @@ -218,9 +302,9 @@ class Item(BaseModel): self.attrs[attribute_name] = DynamoType({"SS": new_value}) elif isinstance(new_value, dict): self.attrs[attribute_name] = DynamoType({"M": new_value}) - elif update_action['Value'].keys() == ['N']: + elif set(update_action['Value'].keys()) == set(['N']): self.attrs[attribute_name] = DynamoType({"N": new_value}) - elif update_action['Value'].keys() == ['NULL']: + elif set(update_action['Value'].keys()) == set(['NULL']): if attribute_name in self.attrs: del self.attrs[attribute_name] else: @@ -243,11 +327,97 @@ class Item(BaseModel): # TODO: implement other data types raise NotImplementedError( 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + elif action == 'DELETE': + if set(update_action['Value'].keys()) == set(['SS']): + existing = self.attrs.get(attribute_name, DynamoType({"SS": {}})) + new_set = set(existing.value).difference(set(new_value)) + self.attrs[attribute_name] = DynamoType({ + "SS": list(new_set) + }) + else: + raise NotImplementedError( + 'ADD not supported for %s' % ', '.join(update_action['Value'].keys())) + else: + raise NotImplementedError( + '%s action not support for update_with_attribute_updates' % action) + + +class StreamRecord(BaseModel): + def __init__(self, table, stream_type, event_name, old, new, seq): + old_a = old.to_json()['Attributes'] if old is not None else {} + new_a = new.to_json()['Attributes'] if new is not None else {} + + rec = old if old is not None else new + keys = {table.hash_key_attr: rec.hash_key.to_json()} + if table.range_key_attr is not None: + keys[table.range_key_attr] = rec.range_key.to_json() + + self.record = { + 'eventID': uuid.uuid4().hex, + 'eventName': event_name, + 'eventSource': 'aws:dynamodb', + 'eventVersion': '1.0', + 'awsRegion': 'us-east-1', + 'dynamodb': { + 'StreamViewType': stream_type, + 'ApproximateCreationDateTime': datetime.datetime.utcnow().isoformat(), + 'SequenceNumber': seq, + 'SizeBytes': 1, + 'Keys': keys + } + } + + if stream_type in ('NEW_IMAGE', 'NEW_AND_OLD_IMAGES'): + self.record['dynamodb']['NewImage'] = new_a + if stream_type in ('OLD_IMAGE', 'NEW_AND_OLD_IMAGES'): + self.record['dynamodb']['OldImage'] = old_a + + # This is a substantial overestimate but it's the easiest to do now + self.record['dynamodb']['SizeBytes'] = len( + json.dumps(self.record['dynamodb'])) + + def to_json(self): + return self.record + + +class StreamShard(BaseModel): + def __init__(self, table): + self.table = table + self.id = 'shardId-00000001541626099285-f35f62ef' + self.starting_sequence_number = 1100000000017454423009 + self.items = [] + self.created_on = datetime.datetime.utcnow() + + def to_json(self): + return { + 'ShardId': self.id, + 'SequenceNumberRange': { + 'StartingSequenceNumber': str(self.starting_sequence_number) + } + } + + def add(self, old, new): + t = self.table.stream_specification['StreamViewType'] + if old is None: + event_name = 'INSERT' + elif new is None: + event_name = 'DELETE' + else: + event_name = 'MODIFY' + seq = len(self.items) + self.starting_sequence_number + self.items.append( + StreamRecord(self.table, t, event_name, old, new, seq)) + + def get(self, start, quantity): + start -= self.starting_sequence_number + assert start >= 0 + end = start + quantity + return [i.to_json() for i in self.items[start:end]] class Table(BaseModel): - def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None): + def __init__(self, table_name, schema=None, attr=None, throughput=None, indexes=None, global_indexes=None, streams=None): self.name = table_name self.attr = attr self.schema = schema @@ -278,10 +448,41 @@ class Table(BaseModel): 'TimeToLiveStatus': 'DISABLED' # One of 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED', # 'AttributeName': 'string' # Can contain this } + self.set_stream_specification(streams) + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + params = {} + + if 'KeySchema' in properties: + params['schema'] = properties['KeySchema'] + if 'AttributeDefinitions' in properties: + params['attr'] = properties['AttributeDefinitions'] + if 'GlobalSecondaryIndexes' in properties: + params['global_indexes'] = properties['GlobalSecondaryIndexes'] + if 'ProvisionedThroughput' in properties: + params['throughput'] = properties['ProvisionedThroughput'] + if 'LocalSecondaryIndexes' in properties: + params['indexes'] = properties['LocalSecondaryIndexes'] + + table = dynamodb_backends[region_name].create_table(name=properties['TableName'], **params) + return table def _generate_arn(self, name): return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name + def set_stream_specification(self, streams): + self.stream_specification = streams + if streams and (streams.get('StreamEnabled') or streams.get('StreamViewType')): + self.stream_specification['StreamEnabled'] = True + self.latest_stream_label = datetime.datetime.utcnow().isoformat() + self.stream_shard = StreamShard(self) + else: + self.stream_specification = {'StreamEnabled': False} + self.latest_stream_label = None + self.stream_shard = None + def describe(self, base_key='TableDescription'): results = { base_key: { @@ -298,6 +499,11 @@ class Table(BaseModel): 'LocalSecondaryIndexes': [index for index in self.indexes], } } + if self.stream_specification and self.stream_specification['StreamEnabled']: + results[base_key]['StreamSpecification'] = self.stream_specification + if self.latest_stream_label: + results[base_key]['LatestStreamLabel'] = self.latest_stream_label + results[base_key]['LatestStreamArn'] = self.table_arn + '/stream/' + self.latest_stream_label return results def __len__(self): @@ -331,57 +537,48 @@ class Table(BaseModel): keys.append(range_key) return keys - def put_item(self, item_attrs, expected=None, overwrite=False): + def put_item(self, item_attrs, expected=None, condition_expression=None, + expression_attribute_names=None, + expression_attribute_values=None, overwrite=False): hash_value = DynamoType(item_attrs.get(self.hash_key_attr)) if self.has_range_key: range_value = DynamoType(item_attrs.get(self.range_key_attr)) else: range_value = None + if expected is None: + expected = {} + lookup_range_value = range_value + else: + expected_range_value = expected.get( + self.range_key_attr, {}).get("Value") + if(expected_range_value is None): + lookup_range_value = range_value + else: + lookup_range_value = DynamoType(expected_range_value) + current = self.get_item(hash_value, lookup_range_value) + item = Item(hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs) if not overwrite: - if expected is None: - expected = {} - lookup_range_value = range_value - else: - expected_range_value = expected.get( - self.range_key_attr, {}).get("Value") - if(expected_range_value is None): - lookup_range_value = range_value - else: - lookup_range_value = DynamoType(expected_range_value) + if not get_expected(expected).expr(current): + raise ValueError('The conditional request failed') + condition_op = get_filter_expression( + condition_expression, + expression_attribute_names, + expression_attribute_values) + if not condition_op.expr(current): + raise ValueError('The conditional request failed') - current = self.get_item(hash_value, lookup_range_value) - - if current is None: - current_attr = {} - elif hasattr(current, 'attrs'): - current_attr = current.attrs - else: - current_attr = current - - for key, val in expected.items(): - if 'Exists' in val and val['Exists'] is False: - if key in current_attr: - raise ValueError("The conditional request failed") - elif key not in current_attr: - raise ValueError("The conditional request failed") - elif 'Value' in val and DynamoType(val['Value']).value != current_attr[key].value: - raise ValueError("The conditional request failed") - elif 'ComparisonOperator' in val: - comparison_func = get_comparison_func( - val['ComparisonOperator']) - dynamo_types = [DynamoType(ele) for ele in val[ - "AttributeValueList"]] - for t in dynamo_types: - if not comparison_func(current_attr[key].value, t.value): - raise ValueError('The conditional request failed') if range_value: self.items[hash_value][range_value] = item else: self.items[hash_value] = item + + if self.stream_shard is not None: + self.stream_shard.add(current, item) + return item def __nonzero__(self): @@ -412,9 +609,14 @@ class Table(BaseModel): def delete_item(self, hash_key, range_key): try: if range_key: - return self.items[hash_key].pop(range_key) + item = self.items[hash_key].pop(range_key) else: - return self.items.pop(hash_key) + item = self.items.pop(hash_key) + + if self.stream_shard is not None: + self.stream_shard.add(item, None) + + return item except KeyError: return None @@ -422,8 +624,9 @@ class Table(BaseModel): exclusive_start_key, scan_index_forward, projection_expression, index_name=None, filter_expression=None, **filter_kwargs): results = [] + if index_name: - all_indexes = (self.global_indexes or []) + (self.indexes or []) + all_indexes = self.all_indexes() indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) if index_name not in indexes_by_name: raise ValueError('Invalid index: %s for table: %s. Available indexes are: %s' % ( @@ -438,24 +641,28 @@ class Table(BaseModel): raise ValueError('Missing Hash Key. KeySchema: %s' % index['KeySchema']) - possible_results = [] - for item in self.all_items(): - if not isinstance(item, Item): - continue - item_hash_key = item.attrs.get(index_hash_key['AttributeName']) - if item_hash_key and item_hash_key == hash_key: - possible_results.append(item) - else: - possible_results = [item for item in list(self.all_items()) if isinstance( - item, Item) and item.hash_key == hash_key] - - if index_name: try: index_range_key = [key for key in index[ 'KeySchema'] if key['KeyType'] == 'RANGE'][0] except IndexError: index_range_key = None + possible_results = [] + for item in self.all_items(): + if not isinstance(item, Item): + continue + item_hash_key = item.attrs.get(index_hash_key['AttributeName']) + if index_range_key is None: + if item_hash_key and item_hash_key == hash_key: + possible_results.append(item) + else: + item_range_key = item.attrs.get(index_range_key['AttributeName']) + if item_hash_key and item_hash_key == hash_key and item_range_key: + possible_results.append(item) + else: + possible_results = [item for item in list(self.all_items()) if isinstance( + item, Item) and item.hash_key == hash_key] + if range_comparison: if index_name and not index_range_key: raise ValueError( @@ -491,14 +698,6 @@ class Table(BaseModel): else: results.sort(key=lambda item: item.range_key) - if projection_expression: - expressions = [x.strip() for x in projection_expression.split(',')] - results = copy.deepcopy(results) - for result in results: - for attr in list(result.attrs): - if attr not in expressions: - result.attrs.pop(attr) - if scan_index_forward is False: results.reverse() @@ -507,6 +706,14 @@ class Table(BaseModel): if filter_expression is not None: results = [item for item in results if filter_expression.expr(item)] + if projection_expression: + expressions = [x.strip() for x in projection_expression.split(',')] + results = copy.deepcopy(results) + for result in results: + for attr in list(result.attrs): + if attr not in expressions: + result.attrs.pop(attr) + results, last_evaluated_key = self._trim_results(results, limit, exclusive_start_key) return results, scanned_count, last_evaluated_key @@ -519,11 +726,39 @@ class Table(BaseModel): else: yield hash_set - def scan(self, filters, limit, exclusive_start_key, filter_expression=None): + def all_indexes(self): + return (self.global_indexes or []) + (self.indexes or []) + + def has_idx_items(self, index_name): + + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + idx = indexes_by_name[index_name] + idx_col_set = set([i['AttributeName'] for i in idx['KeySchema']]) + + for hash_set in self.items.values(): + if self.range_key_attr: + for item in hash_set.values(): + if idx_col_set.issubset(set(item.attrs)): + yield item + else: + if idx_col_set.issubset(set(hash_set.attrs)): + yield hash_set + + def scan(self, filters, limit, exclusive_start_key, filter_expression=None, index_name=None, projection_expression=None): results = [] scanned_count = 0 + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) - for item in self.all_items(): + if index_name: + if index_name not in indexes_by_name: + raise InvalidIndexNameError('The table does not have the specified index: %s' % index_name) + items = self.has_idx_items(index_name) + else: + items = self.all_items() + + for item in items: scanned_count += 1 passes_all_conditions = True for attribute_name, (comparison_operator, comparison_objs) in filters.items(): @@ -549,11 +784,19 @@ class Table(BaseModel): if passes_all_conditions: results.append(item) + if projection_expression: + expressions = [x.strip() for x in projection_expression.split(',')] + results = copy.deepcopy(results) + for result in results: + for attr in list(result.attrs): + if attr not in expressions: + result.attrs.pop(attr) + results, last_evaluated_key = self._trim_results(results, limit, - exclusive_start_key) + exclusive_start_key, index_name) return results, scanned_count, last_evaluated_key - def _trim_results(self, results, limit, exclusive_start_key): + def _trim_results(self, results, limit, exclusive_start_key, scaned_index=None): if exclusive_start_key is not None: hash_key = DynamoType(exclusive_start_key.get(self.hash_key_attr)) range_key = exclusive_start_key.get(self.range_key_attr) @@ -573,6 +816,14 @@ class Table(BaseModel): if results[-1].range_key is not None: last_evaluated_key[self.range_key_attr] = results[-1].range_key + if scaned_index: + all_indexes = self.all_indexes() + indexes_by_name = dict((i['IndexName'], i) for i in all_indexes) + idx = indexes_by_name[scaned_index] + idx_col_list = [i['AttributeName'] for i in idx['KeySchema']] + for col in idx_col_list: + last_evaluated_key[col] = results[-1].attrs[col] + return results, last_evaluated_key def lookup(self, *args, **kwargs): @@ -630,6 +881,13 @@ class DynamoDBBackend(BaseBackend): table.throughput = throughput return table + def update_table_streams(self, name, stream_specification): + table = self.tables[name] + if (stream_specification.get('StreamEnabled') or stream_specification.get('StreamViewType')) and table.latest_stream_label: + raise ValueError('Table already has stream enabled') + table.set_stream_specification(stream_specification) + return table + def update_table_global_indexes(self, name, global_index_updates): table = self.tables[name] gsis_by_name = dict((i['IndexName'], i) for i in table.global_indexes) @@ -660,14 +918,20 @@ class DynamoDBBackend(BaseBackend): gsis_by_name[gsi_to_create['IndexName']] = gsi_to_create - table.global_indexes = gsis_by_name.values() + # in python 3.6, dict.values() returns a dict_values object, but we expect it to be a list in other + # parts of the codebase + table.global_indexes = list(gsis_by_name.values()) return table - def put_item(self, table_name, item_attrs, expected=None, overwrite=False): + def put_item(self, table_name, item_attrs, expected=None, + condition_expression=None, expression_attribute_names=None, + expression_attribute_values=None, overwrite=False): table = self.tables.get(table_name) if not table: return None - return table.put_item(item_attrs, expected, overwrite) + return table.put_item(item_attrs, expected, condition_expression, + expression_attribute_names, + expression_attribute_values, overwrite) def get_table_keys_name(self, table_name, keys): """ @@ -723,15 +987,12 @@ class DynamoDBBackend(BaseBackend): range_values = [DynamoType(range_value) for range_value in range_value_dicts] - if filter_expression is not None: - filter_expression = get_filter_expression(filter_expression, expr_names, expr_values) - else: - filter_expression = Op(None, None) # Will always eval to true + filter_expression = get_filter_expression(filter_expression, expr_names, expr_values) return table.query(hash_key, range_comparison, range_values, limit, exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs) - def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values): + def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values, index_name, projection_expression): table = self.tables.get(table_name) if not table: return None, None, None @@ -741,15 +1002,14 @@ class DynamoDBBackend(BaseBackend): dynamo_types = [DynamoType(value) for value in comparison_values] scan_filters[key] = (comparison_operator, dynamo_types) - if filter_expression is not None: - filter_expression = get_filter_expression(filter_expression, expr_names, expr_values) - else: - filter_expression = Op(None, None) # Will always eval to true + filter_expression = get_filter_expression(filter_expression, expr_names, expr_values) - return table.scan(scan_filters, limit, exclusive_start_key, filter_expression) + projection_expression = ','.join([expr_names.get(attr, attr) for attr in projection_expression.replace(' ', '').split(',')]) + + return table.scan(scan_filters, limit, exclusive_start_key, filter_expression, index_name, projection_expression) def update_item(self, table_name, key, update_expression, attribute_updates, expression_attribute_names, - expression_attribute_values, expected=None): + expression_attribute_values, expected=None, condition_expression=None): table = self.get_table(table_name) if all([table.hash_key_attr in key, table.range_key_attr in key]): @@ -768,32 +1028,17 @@ class DynamoDBBackend(BaseBackend): item = table.get_item(hash_value, range_value) - if item is None: - item_attr = {} - elif hasattr(item, 'attrs'): - item_attr = item.attrs - else: - item_attr = item - if not expected: expected = {} - for key, val in expected.items(): - if 'Exists' in val and val['Exists'] is False: - if key in item_attr: - raise ValueError("The conditional request failed") - elif key not in item_attr: - raise ValueError("The conditional request failed") - elif 'Value' in val and DynamoType(val['Value']).value != item_attr[key].value: - raise ValueError("The conditional request failed") - elif 'ComparisonOperator' in val: - comparison_func = get_comparison_func( - val['ComparisonOperator']) - dynamo_types = [DynamoType(ele) for ele in val[ - "AttributeValueList"]] - for t in dynamo_types: - if not comparison_func(item_attr[key].value, t.value): - raise ValueError('The conditional request failed') + if not get_expected(expected).expr(item): + raise ValueError('The conditional request failed') + condition_op = get_filter_expression( + condition_expression, + expression_attribute_names, + expression_attribute_values) + if not condition_op.expr(item): + raise ValueError('The conditional request failed') # Update does not fail on new items, so create one if item is None: diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 952d33efa..d34b176a7 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -5,9 +5,33 @@ import re from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id +from .exceptions import InvalidIndexNameError from .models import dynamodb_backends, dynamo_json_dump +def has_empty_keys_or_values(_dict): + if _dict == "": + return True + if not isinstance(_dict, dict): + return False + return any( + key == '' or value == '' or + has_empty_keys_or_values(value) + for key, value in _dict.items() + ) + + +def get_empty_str_error(): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return (400, + {'server': 'amazon.com'}, + dynamo_json_dump({'__type': er, + 'message': ('One or more parameter values were ' + 'invalid: An AttributeValue may not ' + 'contain an empty string')} + )) + + class DynamoHandler(BaseResponse): def get_endpoint_name(self, headers): @@ -72,8 +96,16 @@ class DynamoHandler(BaseResponse): body = self.body # get the table name table_name = body['TableName'] - # get the throughput - throughput = body["ProvisionedThroughput"] + # check billing mode and get the throughput + if "BillingMode" in body.keys() and body["BillingMode"] == "PAY_PER_REQUEST": + if "ProvisionedThroughput" in body.keys(): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, + 'ProvisionedThroughput cannot be specified \ + when BillingMode is PAY_PER_REQUEST') + throughput = None + else: # Provisioned (default billing mode) + throughput = body.get("ProvisionedThroughput") # getting the schema key_schema = body['KeySchema'] # getting attribute definition @@ -81,13 +113,16 @@ class DynamoHandler(BaseResponse): # getting the indexes global_indexes = body.get("GlobalSecondaryIndexes", []) local_secondary_indexes = body.get("LocalSecondaryIndexes", []) + # get the stream specification + streams = body.get("StreamSpecification") table = self.dynamodb_backend.create_table(table_name, schema=key_schema, throughput=throughput, attr=attr, global_indexes=global_indexes, - indexes=local_secondary_indexes) + indexes=local_secondary_indexes, + streams=streams) if table is not None: return dynamo_json_dump(table.describe()) else: @@ -140,12 +175,20 @@ class DynamoHandler(BaseResponse): def update_table(self): name = self.body['TableName'] + table = self.dynamodb_backend.get_table(name) if 'GlobalSecondaryIndexUpdates' in self.body: table = self.dynamodb_backend.update_table_global_indexes( name, self.body['GlobalSecondaryIndexUpdates']) if 'ProvisionedThroughput' in self.body: throughput = self.body["ProvisionedThroughput"] table = self.dynamodb_backend.update_table_throughput(name, throughput) + if 'StreamSpecification' in self.body: + try: + table = self.dynamodb_backend.update_table_streams(name, self.body['StreamSpecification']) + except ValueError: + er = 'com.amazonaws.dynamodb.v20111205#ResourceInUseException' + return self.error(er, 'Cannot enable stream') + return dynamo_json_dump(table.describe()) def describe_table(self): @@ -160,17 +203,14 @@ class DynamoHandler(BaseResponse): def put_item(self): name = self.body['TableName'] item = self.body['Item'] + return_values = self.body.get('ReturnValues', 'NONE') - res = re.search('\"\"', json.dumps(item)) - if res: + if return_values not in ('ALL_OLD', 'NONE'): er = 'com.amazonaws.dynamodb.v20111205#ValidationException' - return (400, - {'server': 'amazon.com'}, - dynamo_json_dump({'__type': er, - 'message': ('One or more parameter values were ' - 'invalid: An AttributeValue may not ' - 'contain an empty string')} - )) + return self.error(er, 'Return values set to invalid value') + + if has_empty_keys_or_values(item): + return get_empty_str_error() overwrite = 'Expected' not in self.body if not overwrite: @@ -178,31 +218,27 @@ class DynamoHandler(BaseResponse): else: expected = None + if return_values == 'ALL_OLD': + existing_item = self.dynamodb_backend.get_item(name, item) + if existing_item: + existing_attributes = existing_item.to_json()['Attributes'] + else: + existing_attributes = {} + # Attempt to parse simple ConditionExpressions into an Expected # expression - if not expected: - condition_expression = self.body.get('ConditionExpression') - if condition_expression and 'OR' not in condition_expression: - cond_items = [c.strip() - for c in condition_expression.split('AND')] + condition_expression = self.body.get('ConditionExpression') + expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) + expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) - if cond_items: - expected = {} - overwrite = False - exists_re = re.compile('^attribute_exists\((.*)\)$') - not_exists_re = re.compile( - '^attribute_not_exists\((.*)\)$') - - for cond in cond_items: - exists_m = exists_re.match(cond) - not_exists_m = not_exists_re.match(cond) - if exists_m: - expected[exists_m.group(1)] = {'Exists': True} - elif not_exists_m: - expected[not_exists_m.group(1)] = {'Exists': False} + if condition_expression: + overwrite = False try: - result = self.dynamodb_backend.put_item(name, item, expected, overwrite) + result = self.dynamodb_backend.put_item( + name, item, expected, condition_expression, + expression_attribute_names, expression_attribute_values, + overwrite) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er, 'A condition specified in the operation could not be evaluated.') @@ -213,6 +249,10 @@ class DynamoHandler(BaseResponse): 'TableName': name, 'CapacityUnits': 1 } + if return_values == 'ALL_OLD': + item_dict['Attributes'] = existing_attributes + else: + item_dict.pop('Attributes', None) return dynamo_json_dump(item_dict) else: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' @@ -370,7 +410,7 @@ class DynamoHandler(BaseResponse): range_values = [value_alias_map[ range_key_expression_components[2]]] else: - hash_key_expression = key_condition_expression + hash_key_expression = key_condition_expression.strip('()') range_comparison = None range_values = [] @@ -457,9 +497,10 @@ class DynamoHandler(BaseResponse): filter_expression = self.body.get('FilterExpression') expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) - + projection_expression = self.body.get('ProjectionExpression', '') exclusive_start_key = self.body.get('ExclusiveStartKey') limit = self.body.get("Limit") + index_name = self.body.get('IndexName') try: items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters, @@ -467,7 +508,12 @@ class DynamoHandler(BaseResponse): exclusive_start_key, filter_expression, expression_attribute_names, - expression_attribute_values) + expression_attribute_values, + index_name, + projection_expression) + except InvalidIndexNameError as err: + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, str(err)) except ValueError as err: er = 'com.amazonaws.dynamodb.v20111205#ValidationError' return self.error(er, 'Bad Filter Expression: {0}'.format(err)) @@ -497,7 +543,11 @@ class DynamoHandler(BaseResponse): def delete_item(self): name = self.body['TableName'] keys = self.body['Key'] - return_values = self.body.get('ReturnValues', '') + return_values = self.body.get('ReturnValues', 'NONE') + if return_values not in ('ALL_OLD', 'NONE'): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, 'Return values set to invalid value') + table = self.dynamodb_backend.get_table(name) if not table: er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException' @@ -514,13 +564,26 @@ class DynamoHandler(BaseResponse): def update_item(self): name = self.body['TableName'] key = self.body['Key'] - update_expression = self.body.get('UpdateExpression') + return_values = self.body.get('ReturnValues', 'NONE') + update_expression = self.body.get('UpdateExpression', '').strip() attribute_updates = self.body.get('AttributeUpdates') expression_attribute_names = self.body.get( 'ExpressionAttributeNames', {}) expression_attribute_values = self.body.get( 'ExpressionAttributeValues', {}) existing_item = self.dynamodb_backend.get_item(name, key) + if existing_item: + existing_attributes = existing_item.to_json()['Attributes'] + else: + existing_attributes = {} + + if return_values not in ('NONE', 'ALL_OLD', 'ALL_NEW', 'UPDATED_OLD', + 'UPDATED_NEW'): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return self.error(er, 'Return values set to invalid value') + + if has_empty_keys_or_values(expression_attribute_values): + return get_empty_str_error() if 'Expected' in self.body: expected = self.body['Expected'] @@ -529,25 +592,9 @@ class DynamoHandler(BaseResponse): # Attempt to parse simple ConditionExpressions into an Expected # expression - if not expected: - condition_expression = self.body.get('ConditionExpression') - if condition_expression and 'OR' not in condition_expression: - cond_items = [c.strip() - for c in condition_expression.split('AND')] - - if cond_items: - expected = {} - exists_re = re.compile('^attribute_exists\((.*)\)$') - not_exists_re = re.compile( - '^attribute_not_exists\((.*)\)$') - - for cond in cond_items: - exists_m = exists_re.match(cond) - not_exists_m = not_exists_re.match(cond) - if exists_m: - expected[exists_m.group(1)] = {'Exists': True} - elif not_exists_m: - expected[not_exists_m.group(1)] = {'Exists': False} + condition_expression = self.body.get('ConditionExpression') + expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) + expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) # Support spaces between operators in an update expression # E.g. `a = b + c` -> `a=b+c` @@ -558,7 +605,7 @@ class DynamoHandler(BaseResponse): try: item = self.dynamodb_backend.update_item( name, key, update_expression, attribute_updates, expression_attribute_names, - expression_attribute_values, expected + expression_attribute_values, expected, condition_expression ) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' @@ -572,8 +619,26 @@ class DynamoHandler(BaseResponse): 'TableName': name, 'CapacityUnits': 0.5 } - if not existing_item: + unchanged_attributes = { + k for k in existing_attributes.keys() + if existing_attributes[k] == item_dict['Attributes'].get(k) + } + changed_attributes = set(existing_attributes.keys()).union(item_dict['Attributes'].keys()).difference(unchanged_attributes) + + if return_values == 'NONE': item_dict['Attributes'] = {} + elif return_values == 'ALL_OLD': + item_dict['Attributes'] = existing_attributes + elif return_values == 'UPDATED_OLD': + item_dict['Attributes'] = { + k: v for k, v in existing_attributes.items() + if k in changed_attributes + } + elif return_values == 'UPDATED_NEW': + item_dict['Attributes'] = { + k: v for k, v in item_dict['Attributes'].items() + if k in changed_attributes + } return dynamo_json_dump(item_dict) diff --git a/moto/dynamodbstreams/__init__.py b/moto/dynamodbstreams/__init__.py new file mode 100644 index 000000000..b35879eba --- /dev/null +++ b/moto/dynamodbstreams/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import dynamodbstreams_backends +from ..core.models import base_decorator + +dynamodbstreams_backend = dynamodbstreams_backends['us-east-1'] +mock_dynamodbstreams = base_decorator(dynamodbstreams_backends) diff --git a/moto/dynamodbstreams/models.py b/moto/dynamodbstreams/models.py new file mode 100644 index 000000000..41cc6e280 --- /dev/null +++ b/moto/dynamodbstreams/models.py @@ -0,0 +1,129 @@ +from __future__ import unicode_literals + +import os +import json +import boto3 +import base64 + +from moto.core import BaseBackend, BaseModel +from moto.dynamodb2.models import dynamodb_backends + + +class ShardIterator(BaseModel): + def __init__(self, streams_backend, stream_shard, shard_iterator_type, sequence_number=None): + self.id = base64.b64encode(os.urandom(472)).decode('utf-8') + self.streams_backend = streams_backend + self.stream_shard = stream_shard + self.shard_iterator_type = shard_iterator_type + if shard_iterator_type == 'TRIM_HORIZON': + self.sequence_number = stream_shard.starting_sequence_number + elif shard_iterator_type == 'LATEST': + self.sequence_number = stream_shard.starting_sequence_number + len(stream_shard.items) + elif shard_iterator_type == 'AT_SEQUENCE_NUMBER': + self.sequence_number = sequence_number + elif shard_iterator_type == 'AFTER_SEQUENCE_NUMBER': + self.sequence_number = sequence_number + 1 + + @property + def arn(self): + return '{}/stream/{}|1|{}'.format( + self.stream_shard.table.table_arn, + self.stream_shard.table.latest_stream_label, + self.id) + + def to_json(self): + return { + 'ShardIterator': self.arn + } + + def get(self, limit=1000): + items = self.stream_shard.get(self.sequence_number, limit) + try: + last_sequence_number = max(i['dynamodb']['SequenceNumber'] for i in items) + new_shard_iterator = ShardIterator(self.streams_backend, + self.stream_shard, + 'AFTER_SEQUENCE_NUMBER', + last_sequence_number) + except ValueError: + new_shard_iterator = ShardIterator(self.streams_backend, + self.stream_shard, + 'AT_SEQUENCE_NUMBER', + self.sequence_number) + + self.streams_backend.shard_iterators[new_shard_iterator.arn] = new_shard_iterator + return { + 'NextShardIterator': new_shard_iterator.arn, + 'Records': items + } + + +class DynamoDBStreamsBackend(BaseBackend): + def __init__(self, region): + self.region = region + self.shard_iterators = {} + + def reset(self): + region = self.region + self.__dict__ = {} + self.__init__(region) + + @property + def dynamodb(self): + return dynamodb_backends[self.region] + + def _get_table_from_arn(self, arn): + table_name = arn.split(':', 6)[5].split('/')[1] + return self.dynamodb.get_table(table_name) + + def describe_stream(self, arn): + table = self._get_table_from_arn(arn) + resp = {'StreamDescription': { + 'StreamArn': arn, + 'StreamLabel': table.latest_stream_label, + 'StreamStatus': ('ENABLED' if table.latest_stream_label + else 'DISABLED'), + 'StreamViewType': table.stream_specification['StreamViewType'], + 'CreationRequestDateTime': table.stream_shard.created_on.isoformat(), + 'TableName': table.name, + 'KeySchema': table.schema, + 'Shards': ([table.stream_shard.to_json()] if table.stream_shard + else []) + }} + + return json.dumps(resp) + + def list_streams(self, table_name=None): + streams = [] + for table in self.dynamodb.tables.values(): + if table_name is not None and table.name != table_name: + continue + if table.latest_stream_label: + d = table.describe(base_key='Table') + streams.append({ + 'StreamArn': d['Table']['LatestStreamArn'], + 'TableName': d['Table']['TableName'], + 'StreamLabel': d['Table']['LatestStreamLabel'] + }) + + return json.dumps({'Streams': streams}) + + def get_shard_iterator(self, arn, shard_id, shard_iterator_type, sequence_number=None): + table = self._get_table_from_arn(arn) + assert table.stream_shard.id == shard_id + + shard_iterator = ShardIterator(self, table.stream_shard, + shard_iterator_type, + sequence_number) + self.shard_iterators[shard_iterator.arn] = shard_iterator + + return json.dumps(shard_iterator.to_json()) + + def get_records(self, iterator_arn, limit): + shard_iterator = self.shard_iterators[iterator_arn] + return json.dumps(shard_iterator.get(limit)) + + +available_regions = boto3.session.Session().get_available_regions( + 'dynamodbstreams') +dynamodbstreams_backends = {region: DynamoDBStreamsBackend(region=region) + for region in available_regions} diff --git a/moto/dynamodbstreams/responses.py b/moto/dynamodbstreams/responses.py new file mode 100644 index 000000000..c9c113615 --- /dev/null +++ b/moto/dynamodbstreams/responses.py @@ -0,0 +1,34 @@ +from __future__ import unicode_literals + +from moto.core.responses import BaseResponse + +from .models import dynamodbstreams_backends + + +class DynamoDBStreamsHandler(BaseResponse): + + @property + def backend(self): + return dynamodbstreams_backends[self.region] + + def describe_stream(self): + arn = self._get_param('StreamArn') + return self.backend.describe_stream(arn) + + def list_streams(self): + table_name = self._get_param('TableName') + return self.backend.list_streams(table_name) + + def get_shard_iterator(self): + arn = self._get_param('StreamArn') + shard_id = self._get_param('ShardId') + shard_iterator_type = self._get_param('ShardIteratorType') + return self.backend.get_shard_iterator(arn, shard_id, + shard_iterator_type) + + def get_records(self): + arn = self._get_param('ShardIterator') + limit = self._get_param('Limit') + if limit is None: + limit = 1000 + return self.backend.get_records(arn, limit) diff --git a/moto/dynamodbstreams/urls.py b/moto/dynamodbstreams/urls.py new file mode 100644 index 000000000..1d0f94c35 --- /dev/null +++ b/moto/dynamodbstreams/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import DynamoDBStreamsHandler + +url_bases = [ + "https?://streams.dynamodb.(.+).amazonaws.com" +] + +url_paths = { + "{0}/$": DynamoDBStreamsHandler.dispatch, +} diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 5afb406e3..bb1426612 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -58,6 +58,14 @@ class InvalidKeyPairDuplicateError(EC2ClientError): .format(key)) +class InvalidKeyPairFormatError(EC2ClientError): + + def __init__(self): + super(InvalidKeyPairFormatError, self).__init__( + "InvalidKeyPair.Format", + "Key is not in valid OpenSSH public key format") + + class InvalidVPCIdError(EC2ClientError): def __init__(self, vpc_id): @@ -289,6 +297,15 @@ class InvalidAssociationIdError(EC2ClientError): .format(association_id)) +class InvalidVpcCidrBlockAssociationIdError(EC2ClientError): + + def __init__(self, association_id): + super(InvalidVpcCidrBlockAssociationIdError, self).__init__( + "InvalidVpcCidrBlockAssociationIdError.NotFound", + "The vpc CIDR block association ID '{0}' does not exist" + .format(association_id)) + + class InvalidVPCPeeringConnectionIdError(EC2ClientError): def __init__(self, vpc_peering_connection_id): @@ -324,6 +341,15 @@ class InvalidParameterValueErrorTagNull(EC2ClientError): "Tag value cannot be null. Use empty string instead.") +class InvalidParameterValueErrorUnknownAttribute(EC2ClientError): + + def __init__(self, parameter_value): + super(InvalidParameterValueErrorUnknownAttribute, self).__init__( + "InvalidParameterValue", + "Value ({0}) for parameter attribute is invalid. Unknown attribute." + .format(parameter_value)) + + class InvalidInternetGatewayIdError(EC2ClientError): def __init__(self, internet_gateway_id): @@ -401,3 +427,108 @@ class FilterNotImplementedError(MotoNotImplementedError): super(FilterNotImplementedError, self).__init__( "The filter '{0}' for {1}".format( filter_name, method_name)) + + +class CidrLimitExceeded(EC2ClientError): + + def __init__(self, vpc_id, max_cidr_limit): + super(CidrLimitExceeded, self).__init__( + "CidrLimitExceeded", + "This network '{0}' has met its maximum number of allowed CIDRs: {1}".format(vpc_id, max_cidr_limit) + ) + + +class OperationNotPermitted(EC2ClientError): + + def __init__(self, association_id): + super(OperationNotPermitted, self).__init__( + "OperationNotPermitted", + "The vpc CIDR block with association ID {} may not be disassociated. " + "It is the primary IPv4 CIDR block of the VPC".format(association_id) + ) + + +class InvalidAvailabilityZoneError(EC2ClientError): + + def __init__(self, availability_zone_value, valid_availability_zones): + super(InvalidAvailabilityZoneError, self).__init__( + "InvalidParameterValue", + "Value ({0}) for parameter availabilityZone is invalid. " + "Subnets can currently only be created in the following availability zones: {1}.".format(availability_zone_value, valid_availability_zones) + ) + + +class NetworkAclEntryAlreadyExistsError(EC2ClientError): + + def __init__(self, rule_number): + super(NetworkAclEntryAlreadyExistsError, self).__init__( + "NetworkAclEntryAlreadyExists", + "The network acl entry identified by {} already exists.".format(rule_number) + ) + + +class InvalidSubnetRangeError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidSubnetRangeError, self).__init__( + "InvalidSubnet.Range", + "The CIDR '{}' is invalid.".format(cidr_block) + ) + + +class InvalidCIDRBlockParameterError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidCIDRBlockParameterError, self).__init__( + "InvalidParameterValue", + "Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block) + ) + + +class InvalidDestinationCIDRBlockParameterError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidDestinationCIDRBlockParameterError, self).__init__( + "InvalidParameterValue", + "Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(cidr_block) + ) + + +class InvalidSubnetConflictError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidSubnetConflictError, self).__init__( + "InvalidSubnet.Conflict", + "The CIDR '{}' conflicts with another subnet".format(cidr_block) + ) + + +class InvalidVPCRangeError(EC2ClientError): + + def __init__(self, cidr_block): + super(InvalidVPCRangeError, self).__init__( + "InvalidVpc.Range", + "The CIDR '{}' is invalid.".format(cidr_block) + ) + + +# accept exception +class OperationNotPermitted2(EC2ClientError): + def __init__(self, client_region, pcx_id, acceptor_region): + super(OperationNotPermitted2, self).__init__( + "OperationNotPermitted", + "Incorrect region ({0}) specified for this request." + "VPC peering connection {1} must be accepted in region {2}".format(client_region, pcx_id, acceptor_region) + ) + + +# reject exception +class OperationNotPermitted3(EC2ClientError): + def __init__(self, client_region, pcx_id, acceptor_region): + super(OperationNotPermitted3, self).__init__( + "OperationNotPermitted", + "Incorrect region ({0}) specified for this request." + "VPC peering connection {1} must be accepted or rejected in region {2}".format(client_region, + pcx_id, + acceptor_region) + ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py old mode 100755 new mode 100644 index 1f376c96a..79838147e --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -4,6 +4,7 @@ import copy import itertools import ipaddress import json +import os import re import six import warnings @@ -12,63 +13,79 @@ from pkg_resources import resource_filename import boto.ec2 from collections import defaultdict +import weakref from datetime import datetime from boto.ec2.instance import Instance as BotoInstance, Reservation from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest from boto.ec2.launchspecification import LaunchSpecification + from moto.compat import OrderedDict from moto.core import BaseBackend from moto.core.models import Model, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, camelcase_to_underscores from .exceptions import ( - EC2ClientError, + CidrLimitExceeded, DependencyViolationError, - MissingParameterError, + EC2ClientError, + FilterNotImplementedError, + GatewayNotAttachedError, + InvalidAddressError, + InvalidAllocationIdError, + InvalidAMIIdError, + InvalidAMIAttributeItemValueError, + InvalidAssociationIdError, + InvalidAvailabilityZoneError, + InvalidCIDRBlockParameterError, + InvalidCIDRSubnetError, + InvalidCustomerGatewayIdError, + InvalidDestinationCIDRBlockParameterError, + InvalidDHCPOptionsIdError, + InvalidDomainError, + InvalidID, + InvalidInstanceIdError, + InvalidInternetGatewayIdError, + InvalidKeyPairDuplicateError, + InvalidKeyPairFormatError, + InvalidKeyPairNameError, + InvalidNetworkAclIdError, + InvalidNetworkAttachmentIdError, + InvalidNetworkInterfaceIdError, InvalidParameterValueError, InvalidParameterValueErrorTagNull, - InvalidDHCPOptionsIdError, - MalformedDHCPOptionsIdError, - InvalidKeyPairNameError, - InvalidKeyPairDuplicateError, - InvalidInternetGatewayIdError, - GatewayNotAttachedError, - ResourceAlreadyAssociatedError, - InvalidVPCIdError, - InvalidSubnetIdError, - InvalidNetworkInterfaceIdError, - InvalidNetworkAttachmentIdError, - InvalidSecurityGroupDuplicateError, - InvalidSecurityGroupNotFoundError, + InvalidParameterValueErrorUnknownAttribute, InvalidPermissionNotFoundError, InvalidPermissionDuplicateError, InvalidRouteTableIdError, InvalidRouteError, - InvalidInstanceIdError, - InvalidAMIIdError, - InvalidAMIAttributeItemValueError, + InvalidSecurityGroupDuplicateError, + InvalidSecurityGroupNotFoundError, InvalidSnapshotIdError, + InvalidSubnetConflictError, + InvalidSubnetIdError, + InvalidSubnetRangeError, InvalidVolumeIdError, VolumeInUseError, InvalidVolumeAttachmentError, - InvalidDomainError, - InvalidAddressError, - InvalidAllocationIdError, - InvalidAssociationIdError, + InvalidVpcCidrBlockAssociationIdError, InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionStateTransitionError, - TagLimitExceeded, - InvalidID, - InvalidCIDRSubnetError, - InvalidNetworkAclIdError, + InvalidVPCIdError, + InvalidVPCRangeError, InvalidVpnGatewayIdError, InvalidVpnConnectionIdError, - InvalidCustomerGatewayIdError, - RulesPerSecurityGroupLimitExceededError, + MalformedAMIIdError, + MalformedDHCPOptionsIdError, + MissingParameterError, MotoNotImplementedError, - FilterNotImplementedError -) + NetworkAclEntryAlreadyExistsError, + OperationNotPermitted, + OperationNotPermitted2, + OperationNotPermitted3, + ResourceAlreadyAssociatedError, + RulesPerSecurityGroupLimitExceededError, + TagLimitExceeded) from .utils import ( EC2_RESOURCE_TO_PREFIX, EC2_PREFIX_TO_RESOURCE, @@ -81,6 +98,7 @@ from .utils import ( random_instance_id, random_internet_gateway_id, random_ip, + random_ipv6_cidr, random_nat_gateway_id, random_key_pair, random_private_ip, @@ -97,6 +115,7 @@ from .utils import ( random_subnet_association_id, random_volume_id, random_vpc_id, + random_vpc_cidr_association_id, random_vpc_peering_connection_id, generic_filter, is_valid_resource_id, @@ -112,13 +131,16 @@ from .utils import ( random_customer_gateway_id, is_tag_filter, tag_filter_matches, + rsa_public_key_parse, + rsa_public_key_fingerprint ) INSTANCE_TYPES = json.load( open(resource_filename(__name__, 'resources/instance_types.json'), 'r') ) AMIS = json.load( - open(resource_filename(__name__, 'resources/amis.json'), 'r') + open(os.environ.get('MOTO_AMIS_PATH') or resource_filename( + __name__, 'resources/amis.json'), 'r') ) @@ -127,6 +149,8 @@ def utc_date_and_time(): def validate_resource_ids(resource_ids): + if not resource_ids: + raise MissingParameterError(parameter='resourceIdSet') for resource_id in resource_ids: if not is_valid_resource_id(resource_id): raise InvalidID(resource_id=resource_id) @@ -178,14 +202,15 @@ class TaggedEC2Resource(BaseModel): class NetworkInterface(TaggedEC2Resource): def __init__(self, ec2_backend, subnet, private_ip_address, device_index=0, - public_ip_auto_assign=True, group_ids=None): + public_ip_auto_assign=True, group_ids=None, description=None): self.ec2_backend = ec2_backend self.id = random_eni_id() self.device_index = device_index - self.private_ip_address = private_ip_address + self.private_ip_address = private_ip_address or random_private_ip() self.subnet = subnet self.instance = None self.attachment_id = None + self.description = description self.public_ip = None self.public_ip_auto_assign = public_ip_auto_assign @@ -223,11 +248,13 @@ class NetworkInterface(TaggedEC2Resource): subnet = None private_ip_address = properties.get('PrivateIpAddress', None) + description = properties.get('Description', None) network_interface = ec2_backend.create_network_interface( subnet, private_ip_address, - group_ids=security_group_ids + group_ids=security_group_ids, + description=description ) return network_interface @@ -275,6 +302,8 @@ class NetworkInterface(TaggedEC2Resource): return [group.id for group in self._group_set] elif filter_name == 'availability-zone': return self.subnet.availability_zone + elif filter_name == 'description': + return self.description else: return super(NetworkInterface, self).get_filter_value( filter_name, 'DescribeNetworkInterfaces') @@ -285,9 +314,9 @@ class NetworkInterfaceBackend(object): self.enis = {} super(NetworkInterfaceBackend, self).__init__() - def create_network_interface(self, subnet, private_ip_address, group_ids=None, **kwargs): + def create_network_interface(self, subnet, private_ip_address, group_ids=None, description=None, **kwargs): eni = NetworkInterface( - self, subnet, private_ip_address, group_ids=group_ids, **kwargs) + self, subnet, private_ip_address, group_ids=group_ids, description=description, **kwargs) self.enis[eni.id] = eni return eni @@ -320,6 +349,12 @@ class NetworkInterfaceBackend(object): if group.id in _filter_value: enis.append(eni) break + elif _filter == 'private-ip-address:': + enis = [eni for eni in enis if eni.private_ip_address in _filter_value] + elif _filter == 'subnet-id': + enis = [eni for eni in enis if eni.subnet.id in _filter_value] + elif _filter == 'description': + enis = [eni for eni in enis if eni.description in _filter_value] else: self.raise_not_implemented_error( "The filter '{0}' for DescribeNetworkInterfaces".format(_filter)) @@ -361,6 +396,10 @@ class NetworkInterfaceBackend(object): class Instance(TaggedEC2Resource, BotoInstance): + VALID_ATTRIBUTES = {'instanceType', 'kernel', 'ramdisk', 'userData', 'disableApiTermination', + 'instanceInitiatedShutdownBehavior', 'rootDeviceName', 'blockDeviceMapping', + 'productCodes', 'sourceDestCheck', 'groupSet', 'ebsOptimized', 'sriovNetSupport'} + def __init__(self, ec2_backend, image_id, user_data, security_groups, **kwargs): super(Instance, self).__init__() self.ec2_backend = ec2_backend @@ -378,21 +417,27 @@ class Instance(TaggedEC2Resource, BotoInstance): self.subnet_id = kwargs.get("subnet_id") in_ec2_classic = not bool(self.subnet_id) self.key_name = kwargs.get("key_name") + self.ebs_optimized = kwargs.get("ebs_optimized", False) self.source_dest_check = "true" self.launch_time = utc_date_and_time() + self.ami_launch_index = kwargs.get("ami_launch_index", 0) self.disable_api_termination = kwargs.get("disable_api_termination", False) + self.instance_initiated_shutdown_behavior = kwargs.get("instance_initiated_shutdown_behavior", "stop") + self.sriov_net_support = "simple" self._spot_fleet_id = kwargs.get("spot_fleet_id", None) - associate_public_ip = kwargs.get("associate_public_ip", False) + self.associate_public_ip = kwargs.get("associate_public_ip", False) if in_ec2_classic: # If we are in EC2-Classic, autoassign a public IP - associate_public_ip = True + self.associate_public_ip = True amis = self.ec2_backend.describe_images(filters={'image-id': image_id}) ami = amis[0] if amis else None if ami is None: warnings.warn('Could not find AMI with image-id:{0}, ' 'in the near future this will ' - 'cause an error'.format(image_id), + 'cause an error.\n' + 'Use ec2_backend.describe_images() to ' + 'find suitable image for your test'.format(image_id), PendingDeprecationWarning) self.platform = ami.platform if ami else None @@ -414,9 +459,9 @@ class Instance(TaggedEC2Resource, BotoInstance): self.vpc_id = subnet.vpc_id self._placement.zone = subnet.availability_zone - if associate_public_ip is None: + if self.associate_public_ip is None: # Mapping public ip hasnt been explicitly enabled or disabled - associate_public_ip = subnet.map_public_ip_on_launch == 'true' + self.associate_public_ip = subnet.map_public_ip_on_launch == 'true' elif placement: self._placement.zone = placement else: @@ -428,7 +473,7 @@ class Instance(TaggedEC2Resource, BotoInstance): self.prep_nics( kwargs.get("nics", {}), private_ip=kwargs.get("private_ip"), - associate_public_ip=associate_public_ip + associate_public_ip=self.associate_public_ip ) def __del__(self): @@ -505,6 +550,22 @@ class Instance(TaggedEC2Resource, BotoInstance): instance.add_tag(tag["Key"], tag["Value"]) return instance + @classmethod + def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + ec2_backend = ec2_backends[region_name] + all_instances = ec2_backend.all_instances() + + # the resource_name for instances is the stack name, logical id, and random suffix separated + # by hyphens. So to lookup the instances using the 'aws:cloudformation:logical-id' tag, we need to + # extract the logical-id from the resource_name + logical_id = resource_name.split('-')[1] + + for instance in all_instances: + instance_tags = instance.get_tags() + for tag in instance_tags: + if tag['key'] == 'aws:cloudformation:logical-id' and tag['value'] == logical_id: + instance.delete(region_name) + @property def physical_resource_id(self): return self.id @@ -693,6 +754,7 @@ class InstanceBackend(object): instance_tags = tags.get('instance', {}) for index in range(count): + kwargs["ami_launch_index"] = index new_instance = Instance( self, image_id, @@ -745,14 +807,22 @@ class InstanceBackend(object): setattr(instance, key, value) return instance - def modify_instance_security_groups(self, instance_id, new_group_list): + def modify_instance_security_groups(self, instance_id, new_group_id_list): instance = self.get_instance(instance_id) + new_group_list = [] + for new_group_id in new_group_id_list: + new_group_list.append(self.get_security_group_from_id(new_group_id)) setattr(instance, 'security_groups', new_group_list) return instance - def describe_instance_attribute(self, instance_id, key): - if key == 'group_set': + def describe_instance_attribute(self, instance_id, attribute): + if attribute not in Instance.VALID_ATTRIBUTES: + raise InvalidParameterValueErrorUnknownAttribute(attribute) + + if attribute == 'groupSet': key = 'security_groups' + else: + key = camelcase_to_underscores(attribute) instance = self.get_instance(instance_id) value = getattr(instance, key) return instance, value @@ -878,7 +948,14 @@ class KeyPairBackend(object): def import_key_pair(self, key_name, public_key_material): if key_name in self.keypairs: raise InvalidKeyPairDuplicateError(key_name) - keypair = KeyPair(key_name, **random_key_pair()) + + try: + rsa_public_key = rsa_public_key_parse(public_key_material) + except ValueError: + raise InvalidKeyPairFormatError() + + fingerprint = rsa_public_key_fingerprint(rsa_public_key) + keypair = KeyPair(key_name, material=public_key_material, fingerprint=fingerprint) self.keypairs[key_name] = keypair return keypair @@ -1011,12 +1088,11 @@ class TagBackend(object): class Ami(TaggedEC2Resource): def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None, - name=None, description=None, owner_id=None, - + name=None, description=None, owner_id=111122223333, public=False, virtualization_type=None, architecture=None, state='available', creation_date=None, platform=None, image_type='machine', image_location=None, hypervisor=None, - root_device_type=None, root_device_name=None, sriov='simple', + root_device_type='standard', root_device_name='/dev/sda1', sriov='simple', region_name='us-east-1a' ): self.ec2_backend = ec2_backend @@ -1069,7 +1145,8 @@ class Ami(TaggedEC2Resource): # AWS auto-creates these, we should reflect the same. volume = self.ec2_backend.create_volume(15, region_name) self.ebs_snapshot = self.ec2_backend.create_snapshot( - volume.id, "Auto-created snapshot for AMI %s" % self.id) + volume.id, "Auto-created snapshot for AMI %s" % self.id, owner_id) + self.ec2_backend.delete_volume(volume.id) @property def is_public(self): @@ -1089,7 +1166,7 @@ class Ami(TaggedEC2Resource): elif filter_name == 'image-id': return self.id elif filter_name == 'is-public': - return str(self.is_public) + return self.is_public_string elif filter_name == 'state': return self.state elif filter_name == 'name': @@ -1102,6 +1179,9 @@ class Ami(TaggedEC2Resource): class AmiBackend(object): + + AMI_REGEX = re.compile("ami-[a-z0-9]+") + def __init__(self): self.amis = {} @@ -1114,12 +1194,14 @@ class AmiBackend(object): ami_id = ami['ami_id'] self.amis[ami_id] = Ami(self, **ami) - def create_image(self, instance_id, name=None, description=None, owner_id=None): + def create_image(self, instance_id, name=None, description=None, context=None): # TODO: check that instance exists and pull info from it. ami_id = random_ami_id() instance = self.get_instance(instance_id) + ami = Ami(self, ami_id, instance=instance, source_ami=None, - name=name, description=description, owner_id=owner_id) + name=name, description=description, + owner_id=context.get_current_user() if context else '111122223333') self.amis[ami_id] = ami return ami @@ -1132,28 +1214,43 @@ class AmiBackend(object): self.amis[ami_id] = ami return ami - def describe_images(self, ami_ids=(), filters=None, exec_users=None, owners=None): + def describe_images(self, ami_ids=(), filters=None, exec_users=None, owners=None, + context=None): images = self.amis.values() - # Limit images by launch permissions - if exec_users: - tmp_images = [] - for ami in images: - for user_id in exec_users: - if user_id in ami.launch_permission_users: - tmp_images.append(ami) - images = tmp_images + if len(ami_ids): + # boto3 seems to default to just searching based on ami ids if that parameter is passed + # and if no images are found, it raises an errors + malformed_ami_ids = [ami_id for ami_id in ami_ids if not ami_id.startswith('ami-')] + if malformed_ami_ids: + raise MalformedAMIIdError(malformed_ami_ids) - # Limit by owner ids - if owners: - images = [ami for ami in images if ami.owner_id in owners] - - if ami_ids: images = [ami for ami in images if ami.id in ami_ids] + if len(images) == 0: + raise InvalidAMIIdError(ami_ids) + else: + # Limit images by launch permissions + if exec_users: + tmp_images = [] + for ami in images: + for user_id in exec_users: + if user_id in ami.launch_permission_users: + tmp_images.append(ami) + images = tmp_images + + # Limit by owner ids + if owners: + # support filtering by Owners=['self'] + owners = list(map( + lambda o: context.get_current_user() + if context and o == 'self' else o, + owners)) + images = [ami for ami in images if ami.owner_id in owners] + + # Generic filters + if filters: + return generic_filter(filters, images) - # Generic filters - if filters: - return generic_filter(filters, images) return images def deregister_image(self, ami_id): @@ -1219,20 +1316,117 @@ class Region(object): class Zone(object): - def __init__(self, name, region_name): + def __init__(self, name, region_name, zone_id): self.name = name self.region_name = region_name + self.zone_id = zone_id class RegionsAndZonesBackend(object): regions = [Region(ri.name, ri.endpoint) for ri in boto.ec2.regions()] - zones = dict( - (region, [Zone(region + c, region) for c in 'abc']) - for region in [r.name for r in regions]) + zones = { + 'ap-south-1': [ + Zone(region_name="ap-south-1", name="ap-south-1a", zone_id="aps1-az1"), + Zone(region_name="ap-south-1", name="ap-south-1b", zone_id="aps1-az3") + ], + 'eu-west-3': [ + Zone(region_name="eu-west-3", name="eu-west-3a", zone_id="euw3-az1"), + Zone(region_name="eu-west-3", name="eu-west-3b", zone_id="euw3-az2"), + Zone(region_name="eu-west-3", name="eu-west-3c", zone_id="euw3-az3") + ], + 'eu-north-1': [ + Zone(region_name="eu-north-1", name="eu-north-1a", zone_id="eun1-az1"), + Zone(region_name="eu-north-1", name="eu-north-1b", zone_id="eun1-az2"), + Zone(region_name="eu-north-1", name="eu-north-1c", zone_id="eun1-az3") + ], + 'eu-west-2': [ + Zone(region_name="eu-west-2", name="eu-west-2a", zone_id="euw2-az2"), + Zone(region_name="eu-west-2", name="eu-west-2b", zone_id="euw2-az3"), + Zone(region_name="eu-west-2", name="eu-west-2c", zone_id="euw2-az1") + ], + 'eu-west-1': [ + Zone(region_name="eu-west-1", name="eu-west-1a", zone_id="euw1-az3"), + Zone(region_name="eu-west-1", name="eu-west-1b", zone_id="euw1-az1"), + Zone(region_name="eu-west-1", name="eu-west-1c", zone_id="euw1-az2") + ], + 'ap-northeast-3': [ + Zone(region_name="ap-northeast-3", name="ap-northeast-2a", zone_id="apne3-az1") + ], + 'ap-northeast-2': [ + Zone(region_name="ap-northeast-2", name="ap-northeast-2a", zone_id="apne2-az1"), + Zone(region_name="ap-northeast-2", name="ap-northeast-2c", zone_id="apne2-az3") + ], + 'ap-northeast-1': [ + Zone(region_name="ap-northeast-1", name="ap-northeast-1a", zone_id="apne1-az4"), + Zone(region_name="ap-northeast-1", name="ap-northeast-1c", zone_id="apne1-az1"), + Zone(region_name="ap-northeast-1", name="ap-northeast-1d", zone_id="apne1-az2") + ], + 'sa-east-1': [ + Zone(region_name="sa-east-1", name="sa-east-1a", zone_id="sae1-az1"), + Zone(region_name="sa-east-1", name="sa-east-1c", zone_id="sae1-az3") + ], + 'ca-central-1': [ + Zone(region_name="ca-central-1", name="ca-central-1a", zone_id="cac1-az1"), + Zone(region_name="ca-central-1", name="ca-central-1b", zone_id="cac1-az2") + ], + 'ap-southeast-1': [ + Zone(region_name="ap-southeast-1", name="ap-southeast-1a", zone_id="apse1-az1"), + Zone(region_name="ap-southeast-1", name="ap-southeast-1b", zone_id="apse1-az2"), + Zone(region_name="ap-southeast-1", name="ap-southeast-1c", zone_id="apse1-az3") + ], + 'ap-southeast-2': [ + Zone(region_name="ap-southeast-2", name="ap-southeast-2a", zone_id="apse2-az1"), + Zone(region_name="ap-southeast-2", name="ap-southeast-2b", zone_id="apse2-az3"), + Zone(region_name="ap-southeast-2", name="ap-southeast-2c", zone_id="apse2-az2") + ], + 'eu-central-1': [ + Zone(region_name="eu-central-1", name="eu-central-1a", zone_id="euc1-az2"), + Zone(region_name="eu-central-1", name="eu-central-1b", zone_id="euc1-az3"), + Zone(region_name="eu-central-1", name="eu-central-1c", zone_id="euc1-az1") + ], + 'us-east-1': [ + Zone(region_name="us-east-1", name="us-east-1a", zone_id="use1-az6"), + Zone(region_name="us-east-1", name="us-east-1b", zone_id="use1-az1"), + Zone(region_name="us-east-1", name="us-east-1c", zone_id="use1-az2"), + Zone(region_name="us-east-1", name="us-east-1d", zone_id="use1-az4"), + Zone(region_name="us-east-1", name="us-east-1e", zone_id="use1-az3"), + Zone(region_name="us-east-1", name="us-east-1f", zone_id="use1-az5") + ], + 'us-east-2': [ + Zone(region_name="us-east-2", name="us-east-2a", zone_id="use2-az1"), + Zone(region_name="us-east-2", name="us-east-2b", zone_id="use2-az2"), + Zone(region_name="us-east-2", name="us-east-2c", zone_id="use2-az3") + ], + 'us-west-1': [ + Zone(region_name="us-west-1", name="us-west-1a", zone_id="usw1-az3"), + Zone(region_name="us-west-1", name="us-west-1b", zone_id="usw1-az1") + ], + 'us-west-2': [ + Zone(region_name="us-west-2", name="us-west-2a", zone_id="usw2-az2"), + Zone(region_name="us-west-2", name="us-west-2b", zone_id="usw2-az1"), + Zone(region_name="us-west-2", name="us-west-2c", zone_id="usw2-az3") + ], + 'cn-north-1': [ + Zone(region_name="cn-north-1", name="cn-north-1a", zone_id="cnn1-az1"), + Zone(region_name="cn-north-1", name="cn-north-1b", zone_id="cnn1-az2") + ], + 'us-gov-west-1': [ + Zone(region_name="us-gov-west-1", name="us-gov-west-1a", zone_id="usgw1-az1"), + Zone(region_name="us-gov-west-1", name="us-gov-west-1b", zone_id="usgw1-az2"), + Zone(region_name="us-gov-west-1", name="us-gov-west-1c", zone_id="usgw1-az3") + ] + } - def describe_regions(self): - return self.regions + def describe_regions(self, region_names=[]): + if len(region_names) == 0: + return self.regions + ret = [] + for name in region_names: + for region in self.regions: + if region.name == name: + ret.append(region) + return ret def describe_availability_zones(self): return self.zones[self.region_name] @@ -1272,7 +1466,7 @@ class SecurityGroup(TaggedEC2Resource): self.name = name self.description = description self.ingress_rules = [] - self.egress_rules = [SecurityRule(-1, -1, -1, ['0.0.0.0/0'], [])] + self.egress_rules = [SecurityRule(-1, None, None, ['0.0.0.0/0'], [])] self.enis = {} self.vpc_id = vpc_id self.owner_id = "123456789012" @@ -1663,6 +1857,7 @@ class SecurityGroupIngress(object): group_id = properties.get('GroupId') ip_protocol = properties.get("IpProtocol") cidr_ip = properties.get("CidrIp") + cidr_ipv6 = properties.get("CidrIpv6") from_port = properties.get("FromPort") source_security_group_id = properties.get("SourceSecurityGroupId") source_security_group_name = properties.get("SourceSecurityGroupName") @@ -1671,7 +1866,7 @@ class SecurityGroupIngress(object): to_port = properties.get("ToPort") assert group_id or group_name - assert source_security_group_name or cidr_ip or source_security_group_id + assert source_security_group_name or cidr_ip or cidr_ipv6 or source_security_group_id assert ip_protocol if source_security_group_id: @@ -1787,13 +1982,15 @@ class Volume(TaggedEC2Resource): return self.id elif filter_name == 'encrypted': return str(self.encrypted).lower() + elif filter_name == 'availability-zone': + return self.zone.name else: return super(Volume, self).get_filter_value( filter_name, 'DescribeVolumes') class Snapshot(TaggedEC2Resource): - def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False): + def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False, owner_id='123456789012'): self.id = snapshot_id self.volume = volume self.description = description @@ -1802,6 +1999,7 @@ class Snapshot(TaggedEC2Resource): self.ec2_backend = ec2_backend self.status = 'completed' self.encrypted = encrypted + self.owner_id = owner_id def get_filter_value(self, filter_name): if filter_name == 'description': @@ -1818,6 +2016,8 @@ class Snapshot(TaggedEC2Resource): return str(self.encrypted).lower() elif filter_name == 'status': return self.status + elif filter_name == 'owner-id': + return self.owner_id else: return super(Snapshot, self).get_filter_value( filter_name, 'DescribeSnapshots') @@ -1896,11 +2096,13 @@ class EBSBackend(object): volume.attachment = None return old_attachment - def create_snapshot(self, volume_id, description): + def create_snapshot(self, volume_id, description, owner_id=None): snapshot_id = random_snapshot_id() volume = self.get_volume(volume_id) - snapshot = Snapshot(self, snapshot_id, volume, - description, volume.encrypted) + params = [self, snapshot_id, volume, description, volume.encrypted] + if owner_id: + params.append(owner_id) + snapshot = Snapshot(*params) self.snapshots[snapshot_id] = snapshot return snapshot @@ -1916,6 +2118,15 @@ class EBSBackend(object): matches = generic_filter(filters, matches) return matches + def copy_snapshot(self, source_snapshot_id, source_region, description=None): + source_snapshot = ec2_backends[source_region].describe_snapshots( + snapshot_ids=[source_snapshot_id])[0] + snapshot_id = random_snapshot_id() + snapshot = Snapshot(self, snapshot_id, volume=source_snapshot.volume, + description=description, encrypted=source_snapshot.encrypted) + self.snapshots[snapshot_id] = snapshot + return snapshot + def get_snapshot(self, snapshot_id): snapshot = self.snapshots.get(snapshot_id, None) if not snapshot: @@ -1955,10 +2166,13 @@ class EBSBackend(object): class VPC(TaggedEC2Resource): - def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default'): + def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default', + amazon_provided_ipv6_cidr_block=False): + self.ec2_backend = ec2_backend self.id = vpc_id self.cidr_block = cidr_block + self.cidr_block_association_set = {} self.dhcp_options = None self.state = 'available' self.instance_tenancy = instance_tenancy @@ -1968,6 +2182,10 @@ class VPC(TaggedEC2Resource): # or VPCs created using the wizard of the VPC console self.enable_dns_hostnames = 'true' if is_default else 'false' + self.associate_vpc_cidr_block(cidr_block) + if amazon_provided_ipv6_cidr_block: + self.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_block) + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -1977,6 +2195,11 @@ class VPC(TaggedEC2Resource): cidr_block=properties['CidrBlock'], instance_tenancy=properties.get('InstanceTenancy', 'default') ) + for tag in properties.get("Tags", []): + tag_key = tag["Key"] + tag_value = tag["Value"] + vpc.add_tag(tag_key, tag_value) + return vpc @property @@ -1988,6 +2211,12 @@ class VPC(TaggedEC2Resource): return self.id elif filter_name in ('cidr', 'cidr-block', 'cidrBlock'): return self.cidr_block + elif filter_name in ('cidr-block-association.cidr-block', 'ipv6-cidr-block-association.ipv6-cidr-block'): + return [c['cidr_block'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)] + elif filter_name in ('cidr-block-association.association-id', 'ipv6-cidr-block-association.association-id'): + return self.cidr_block_association_set.keys() + elif filter_name in ('cidr-block-association.state', 'ipv6-cidr-block-association.state'): + return [c['cidr_block_state']['state'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)] elif filter_name in ('instance_tenancy', 'InstanceTenancy'): return self.instance_tenancy elif filter_name in ('is-default', 'isDefault'): @@ -1999,19 +2228,63 @@ class VPC(TaggedEC2Resource): return None return self.dhcp_options.id else: - return super(VPC, self).get_filter_value( - filter_name, 'DescribeVpcs') + return super(VPC, self).get_filter_value(filter_name, 'DescribeVpcs') + + def associate_vpc_cidr_block(self, cidr_block, amazon_provided_ipv6_cidr_block=False): + max_associations = 5 if not amazon_provided_ipv6_cidr_block else 1 + + if len(self.get_cidr_block_association_set(amazon_provided_ipv6_cidr_block)) >= max_associations: + raise CidrLimitExceeded(self.id, max_associations) + + association_id = random_vpc_cidr_association_id() + + association_set = { + 'association_id': association_id, + 'cidr_block_state': {'state': 'associated', 'StatusMessage': ''} + } + + association_set['cidr_block'] = random_ipv6_cidr() if amazon_provided_ipv6_cidr_block else cidr_block + self.cidr_block_association_set[association_id] = association_set + return association_set + + def disassociate_vpc_cidr_block(self, association_id): + if self.cidr_block == self.cidr_block_association_set.get(association_id, {}).get('cidr_block'): + raise OperationNotPermitted(association_id) + + response = self.cidr_block_association_set.pop(association_id, {}) + if response: + response['vpc_id'] = self.id + response['cidr_block_state']['state'] = 'disassociating' + return response + + def get_cidr_block_association_set(self, ipv6=False): + return [c for c in self.cidr_block_association_set.values() if ('::/' if ipv6 else '.') in c.get('cidr_block')] class VPCBackend(object): + vpc_refs = defaultdict(set) + def __init__(self): self.vpcs = {} + self.vpc_refs[self.__class__].add(weakref.ref(self)) super(VPCBackend, self).__init__() - def create_vpc(self, cidr_block, instance_tenancy='default'): + @classmethod + def get_vpc_refs(cls): + for inst_ref in cls.vpc_refs[cls]: + inst = inst_ref() + if inst is not None: + yield inst + + def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False): vpc_id = random_vpc_id() - vpc = VPC(self, vpc_id, cidr_block, len( - self.vpcs) == 0, instance_tenancy) + try: + vpc_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False) + except ValueError: + raise InvalidCIDRBlockParameterError(cidr_block) + if vpc_cidr_block.prefixlen < 16 or vpc_cidr_block.prefixlen > 28: + raise InvalidVPCRangeError(cidr_block) + vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block) self.vpcs[vpc_id] = vpc # AWS creates a default main route table and security group. @@ -2032,6 +2305,13 @@ class VPCBackend(object): raise InvalidVPCIdError(vpc_id) return self.vpcs.get(vpc_id) + # get vpc by vpc id and aws region + def get_cross_vpc(self, vpc_id, peer_region): + for vpcs in self.get_vpc_refs(): + if vpcs.region_name == peer_region: + match_vpc = vpcs.get_vpc(vpc_id) + return match_vpc + def get_all_vpcs(self, vpc_ids=None, filters=None): matches = self.vpcs.values() if vpc_ids: @@ -2084,12 +2364,28 @@ class VPCBackend(object): else: raise InvalidParameterValueError(attr_name) + def disassociate_vpc_cidr_block(self, association_id): + for vpc in self.vpcs.values(): + response = vpc.disassociate_vpc_cidr_block(association_id) + if response: + return response + else: + raise InvalidVpcCidrBlockAssociationIdError(association_id) + + def associate_vpc_cidr_block(self, vpc_id, cidr_block, amazon_provided_ipv6_cidr_block): + vpc = self.get_vpc(vpc_id) + return vpc.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block) + class VPCPeeringConnectionStatus(object): def __init__(self, code='initiating-request', message=''): self.code = code self.message = message + def deleted(self): + self.code = 'deleted' + self.message = 'Deleted by {deleter ID}' + def initiating(self): self.code = 'initiating-request' self.message = 'Initiating Request to {accepter ID}' @@ -2132,15 +2428,31 @@ class VPCPeeringConnection(TaggedEC2Resource): class VPCPeeringConnectionBackend(object): + # for cross region vpc reference + vpc_pcx_refs = defaultdict(set) + def __init__(self): self.vpc_pcxs = {} + self.vpc_pcx_refs[self.__class__].add(weakref.ref(self)) super(VPCPeeringConnectionBackend, self).__init__() + @classmethod + def get_vpc_pcx_refs(cls): + for inst_ref in cls.vpc_pcx_refs[cls]: + inst = inst_ref() + if inst is not None: + yield inst + def create_vpc_peering_connection(self, vpc, peer_vpc): vpc_pcx_id = random_vpc_peering_connection_id() vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc) vpc_pcx._status.pending() self.vpc_pcxs[vpc_pcx_id] = vpc_pcx + # insert cross region peering info + if vpc.ec2_backend.region_name != peer_vpc.ec2_backend.region_name: + for vpc_pcx_cx in peer_vpc.ec2_backend.get_vpc_pcx_refs(): + if vpc_pcx_cx.region_name == peer_vpc.ec2_backend.region_name: + vpc_pcx_cx.vpc_pcxs[vpc_pcx_id] = vpc_pcx return vpc_pcx def get_all_vpc_peering_connections(self): @@ -2152,13 +2464,17 @@ class VPCPeeringConnectionBackend(object): return self.vpc_pcxs.get(vpc_pcx_id) def delete_vpc_peering_connection(self, vpc_pcx_id): - deleted = self.vpc_pcxs.pop(vpc_pcx_id, None) - if not deleted: - raise InvalidVPCPeeringConnectionIdError(vpc_pcx_id) + deleted = self.get_vpc_peering_connection(vpc_pcx_id) + deleted._status.deleted() return deleted def accept_vpc_peering_connection(self, vpc_pcx_id): vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) + # if cross region need accepter from another region + pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name + pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name + if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region: + raise OperationNotPermitted2(self.region_name, vpc_pcx.id, pcx_acp_region) if vpc_pcx._status.code != 'pending-acceptance': raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) vpc_pcx._status.accept() @@ -2166,6 +2482,11 @@ class VPCPeeringConnectionBackend(object): def reject_vpc_peering_connection(self, vpc_pcx_id): vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id) + # if cross region need accepter from another region + pcx_req_region = vpc_pcx.vpc.ec2_backend.region_name + pcx_acp_region = vpc_pcx.peer_vpc.ec2_backend.region_name + if pcx_req_region != pcx_acp_region and self.region_name == pcx_req_region: + raise OperationNotPermitted3(self.region_name, vpc_pcx.id, pcx_acp_region) if vpc_pcx._status.code != 'pending-acceptance': raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id) vpc_pcx._status.reject() @@ -2174,15 +2495,18 @@ class VPCPeeringConnectionBackend(object): class Subnet(TaggedEC2Resource): def __init__(self, ec2_backend, subnet_id, vpc_id, cidr_block, availability_zone, default_for_az, - map_public_ip_on_launch): + map_public_ip_on_launch, owner_id=111122223333, assign_ipv6_address_on_creation=False): self.ec2_backend = ec2_backend self.id = subnet_id self.vpc_id = vpc_id self.cidr_block = cidr_block - self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block)) + self.cidr = ipaddress.IPv4Network(six.text_type(self.cidr_block), strict=False) self._availability_zone = availability_zone self.default_for_az = default_for_az self.map_public_ip_on_launch = map_public_ip_on_launch + self.owner_id = owner_id + self.assign_ipv6_address_on_creation = assign_ipv6_address_on_creation + self.ipv6_cidr_block_associations = [] # Theory is we assign ip's as we go (as 16,777,214 usable IPs in a /8) self._subnet_ip_generator = self.cidr.hosts() @@ -2212,7 +2536,7 @@ class Subnet(TaggedEC2Resource): @property def availability_zone(self): - return self._availability_zone + return self._availability_zone.name @property def physical_resource_id(self): @@ -2309,19 +2633,38 @@ class SubnetBackend(object): return subnets[subnet_id] raise InvalidSubnetIdError(subnet_id) - def create_subnet(self, vpc_id, cidr_block, availability_zone): + def create_subnet(self, vpc_id, cidr_block, availability_zone, context=None): subnet_id = random_subnet_id() - self.get_vpc(vpc_id) # Validate VPC exists + vpc = self.get_vpc(vpc_id) # Validate VPC exists and the supplied CIDR block is a subnet of the VPC's + vpc_cidr_block = ipaddress.IPv4Network(six.text_type(vpc.cidr_block), strict=False) + try: + subnet_cidr_block = ipaddress.IPv4Network(six.text_type(cidr_block), strict=False) + except ValueError: + raise InvalidCIDRBlockParameterError(cidr_block) + if not (vpc_cidr_block.network_address <= subnet_cidr_block.network_address and + vpc_cidr_block.broadcast_address >= subnet_cidr_block.broadcast_address): + raise InvalidSubnetRangeError(cidr_block) + + for subnet in self.get_all_subnets(filters={'vpc-id': vpc_id}): + if subnet.cidr.overlaps(subnet_cidr_block): + raise InvalidSubnetConflictError(cidr_block) # if this is the first subnet for an availability zone, # consider it the default default_for_az = str(availability_zone not in self.subnets).lower() map_public_ip_on_launch = default_for_az - subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone, - default_for_az, map_public_ip_on_launch) + if availability_zone is None: + availability_zone = 'us-east-1a' + try: + availability_zone_data = next(zone for zones in RegionsAndZonesBackend.zones.values() for zone in zones if zone.name == availability_zone) + except StopIteration: + raise InvalidAvailabilityZoneError(availability_zone, ", ".join([zone.name for zones in RegionsAndZonesBackend.zones.values() for zone in zones])) + subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone_data, + default_for_az, map_public_ip_on_launch, + owner_id=context.get_current_user() if context else '111122223333', assign_ipv6_address_on_creation=False) # AWS associates a new subnet with the default Network ACL - self.associate_default_network_acl_with_subnet(subnet_id) + self.associate_default_network_acl_with_subnet(subnet_id, vpc_id) self.subnets[availability_zone][subnet_id] = subnet return subnet @@ -2346,11 +2689,12 @@ class SubnetBackend(object): return subnets.pop(subnet_id, None) raise InvalidSubnetIdError(subnet_id) - def modify_subnet_attribute(self, subnet_id, map_public_ip): + def modify_subnet_attribute(self, subnet_id, attr_name, attr_value): subnet = self.get_subnet(subnet_id) - if map_public_ip not in ('true', 'false'): - raise InvalidParameterValueError(map_public_ip) - subnet.map_public_ip_on_launch = map_public_ip + if attr_name in ('map_public_ip_on_launch', 'assign_ipv6_address_on_creation'): + setattr(subnet, attr_name, attr_value) + else: + raise InvalidParameterValueError(attr_name) class SubnetRouteTableAssociation(object): @@ -2542,7 +2886,7 @@ class Route(object): ec2_backend = ec2_backends[region_name] route_table = ec2_backend.create_route( route_table_id=route_table_id, - destination_cidr_block=properties['DestinationCidrBlock'], + destination_cidr_block=properties.get('DestinationCidrBlock'), gateway_id=gateway_id, instance_id=instance_id, interface_id=interface_id, @@ -2571,6 +2915,11 @@ class RouteBackend(object): elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id: gateway = self.get_internet_gateway(gateway_id) + try: + ipaddress.IPv4Network(six.text_type(destination_cidr_block), strict=False) + except ValueError: + raise InvalidDestinationCIDRBlockParameterError(destination_cidr_block) + route = Route(route_table, destination_cidr_block, local=local, gateway=gateway, instance=self.get_instance( @@ -2736,7 +3085,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): def __init__(self, ec2_backend, spot_request_id, price, image_id, type, valid_from, valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, - kernel_id, ramdisk_id, monitoring_enabled, subnet_id, spot_fleet_id, + kernel_id, ramdisk_id, monitoring_enabled, subnet_id, tags, spot_fleet_id, **kwargs): super(SpotInstanceRequest, self).__init__(**kwargs) ls = LaunchSpecification() @@ -2760,6 +3109,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): ls.monitored = monitoring_enabled ls.subnet_id = subnet_id self.spot_fleet_id = spot_fleet_id + self.tags = tags if security_groups: for group_name in security_groups: @@ -2793,6 +3143,7 @@ class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource): security_group_names=[], security_group_ids=self.launch_specification.groups, spot_fleet_id=self.spot_fleet_id, + tags=self.tags, ) instance = reservation.instances[0] return instance @@ -2808,15 +3159,16 @@ class SpotRequestBackend(object): valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, kernel_id, ramdisk_id, - monitoring_enabled, subnet_id, spot_fleet_id=None): + monitoring_enabled, subnet_id, tags=None, spot_fleet_id=None): requests = [] + tags = tags or {} for _ in range(count): spot_request_id = random_spot_request_id() request = SpotInstanceRequest(self, spot_request_id, price, image_id, type, valid_from, valid_until, launch_group, availability_zone_group, key_name, security_groups, user_data, instance_type, placement, kernel_id, ramdisk_id, - monitoring_enabled, subnet_id, spot_fleet_id) + monitoring_enabled, subnet_id, tags, spot_fleet_id) self.spot_instance_requests[spot_request_id] = request requests.append(request) return requests @@ -2836,8 +3188,8 @@ class SpotRequestBackend(object): class SpotFleetLaunchSpec(object): def __init__(self, ebs_optimized, group_set, iam_instance_profile, image_id, - instance_type, key_name, monitoring, spot_price, subnet_id, user_data, - weighted_capacity): + instance_type, key_name, monitoring, spot_price, subnet_id, tag_specifications, + user_data, weighted_capacity): self.ebs_optimized = ebs_optimized self.group_set = group_set self.iam_instance_profile = iam_instance_profile @@ -2847,6 +3199,7 @@ class SpotFleetLaunchSpec(object): self.monitoring = monitoring self.spot_price = spot_price self.subnet_id = subnet_id + self.tag_specifications = tag_specifications self.user_data = user_data self.weighted_capacity = float(weighted_capacity) @@ -2877,6 +3230,7 @@ class SpotFleetRequest(TaggedEC2Resource): monitoring=spec.get('monitoring._enabled'), spot_price=spec.get('spot_price', self.spot_price), subnet_id=spec['subnet_id'], + tag_specifications=self._parse_tag_specifications(spec), user_data=spec.get('user_data'), weighted_capacity=spec['weighted_capacity'], ) @@ -2895,7 +3249,7 @@ class SpotFleetRequest(TaggedEC2Resource): 'Properties']['SpotFleetRequestConfigData'] ec2_backend = ec2_backends[region_name] - spot_price = properties['SpotPrice'] + spot_price = properties.get('SpotPrice') target_capacity = properties['TargetCapacity'] iam_fleet_role = properties['IamFleetRole'] allocation_strategy = properties['AllocationStrategy'] @@ -2929,7 +3283,8 @@ class SpotFleetRequest(TaggedEC2Resource): launch_spec_index += 1 else: # lowestPrice cheapest_spec = sorted( - self.launch_specs, key=lambda spec: float(spec.spot_price))[0] + # FIXME: change `+inf` to the on demand price scaled to weighted capacity when it's not present + self.launch_specs, key=lambda spec: float(spec.spot_price or '+inf'))[0] weight_so_far = weight_to_add + (weight_to_add % cheapest_spec.weighted_capacity) weight_map[cheapest_spec] = int( weight_so_far // cheapest_spec.weighted_capacity) @@ -2958,6 +3313,7 @@ class SpotFleetRequest(TaggedEC2Resource): monitoring_enabled=launch_spec.monitoring, subnet_id=launch_spec.subnet_id, spot_fleet_id=self.id, + tags=launch_spec.tag_specifications, ) self.spot_requests.extend(requests) self.fulfilled_capacity += added_weight @@ -2980,6 +3336,25 @@ class SpotFleetRequest(TaggedEC2Resource): self.spot_requests = [req for req in self.spot_requests if req.instance.id not in instance_ids] self.ec2_backend.terminate_instances(instance_ids) + def _parse_tag_specifications(self, spec): + try: + tag_spec_num = max([int(key.split('.')[1]) for key in spec if key.startswith("tag_specification_set")]) + except ValueError: # no tag specifications + return {} + + tag_specifications = {} + for si in range(1, tag_spec_num + 1): + resource_type = spec["tag_specification_set.{si}._resource_type".format(si=si)] + + tags = [key for key in spec if key.startswith("tag_specification_set.{si}._tag".format(si=si))] + tag_num = max([int(key.split('.')[3]) for key in tags]) + tag_specifications[resource_type] = dict(( + spec["tag_specification_set.{si}._tag.{ti}._key".format(si=si, ti=ti)], + spec["tag_specification_set.{si}._tag.{ti}._value".format(si=si, ti=ti)], + ) for ti in range(1, tag_num + 1)) + + return tag_specifications + class SpotFleetBackend(object): def __init__(self): @@ -3036,8 +3411,11 @@ class SpotFleetBackend(object): class ElasticAddress(object): - def __init__(self, domain): - self.public_ip = random_ip() + def __init__(self, domain, address=None): + if address: + self.public_ip = address + else: + self.public_ip = random_ip() self.allocation_id = random_eip_allocation_id() if domain == "vpc" else None self.domain = domain self.instance = None @@ -3099,11 +3477,13 @@ class ElasticAddressBackend(object): self.addresses = [] super(ElasticAddressBackend, self).__init__() - def allocate_address(self, domain): + def allocate_address(self, domain, address=None): if domain not in ['standard', 'vpc']: raise InvalidDomainError(domain) - - address = ElasticAddress(domain) + if address: + address = ElasticAddress(domain, address) + else: + address = ElasticAddress(domain) self.addresses.append(address) return address @@ -3411,8 +3791,22 @@ class NetworkAclBackend(object): self.get_vpc(vpc_id) network_acl = NetworkAcl(self, network_acl_id, vpc_id, default) self.network_acls[network_acl_id] = network_acl + if default: + self.add_default_entries(network_acl_id) return network_acl + def add_default_entries(self, network_acl_id): + default_acl_entries = [ + {'rule_number': "100", 'rule_action': 'allow', 'egress': 'true'}, + {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'true'}, + {'rule_number': "100", 'rule_action': 'allow', 'egress': 'false'}, + {'rule_number': "32767", 'rule_action': 'deny', 'egress': 'false'} + ] + for entry in default_acl_entries: + self.create_network_acl_entry(network_acl_id=network_acl_id, rule_number=entry['rule_number'], protocol='-1', + rule_action=entry['rule_action'], egress=entry['egress'], cidr_block='0.0.0.0/0', + icmp_code=None, icmp_type=None, port_range_from=None, port_range_to=None) + def get_all_network_acls(self, network_acl_ids=None, filters=None): network_acls = self.network_acls.values() @@ -3437,12 +3831,14 @@ class NetworkAclBackend(object): icmp_code, icmp_type, port_range_from, port_range_to): + network_acl = self.get_network_acl(network_acl_id) + if any(entry.egress == egress and entry.rule_number == rule_number for entry in network_acl.network_acl_entries): + raise NetworkAclEntryAlreadyExistsError(rule_number) network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number, protocol, rule_action, egress, cidr_block, icmp_code, icmp_type, port_range_from, port_range_to) - network_acl = self.get_network_acl(network_acl_id) network_acl.network_acl_entries.append(network_acl_entry) return network_acl_entry @@ -3487,9 +3883,9 @@ class NetworkAclBackend(object): new_acl.associations[new_assoc_id] = association return association - def associate_default_network_acl_with_subnet(self, subnet_id): + def associate_default_network_acl_with_subnet(self, subnet_id, vpc_id): association_id = random_network_acl_subnet_association_id() - acl = next(acl for acl in self.network_acls.values() if acl.default) + acl = next(acl for acl in self.network_acls.values() if acl.default and acl.vpc_id == vpc_id) acl.associations[association_id] = NetworkAclAssociation(self, association_id, subnet_id, acl.id) @@ -3705,6 +4101,7 @@ class NatGateway(object): class NatGatewayBackend(object): def __init__(self): self.nat_gateways = {} + super(NatGatewayBackend, self).__init__() def get_all_nat_gateways(self, filters): return self.nat_gateways.values() diff --git a/moto/ec2/resources/amis.json b/moto/ec2/resources/amis.json index 5cc3122f3..6e4794e22 100644 --- a/moto/ec2/resources/amis.json +++ b/moto/ec2/resources/amis.json @@ -4,6 +4,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -20,6 +21,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -36,6 +38,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -52,6 +55,7 @@ "state": "available", "public": true, "owner_id": "099720109477", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -68,6 +72,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -84,6 +89,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -100,6 +106,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -116,6 +123,7 @@ "state": "available", "public": true, "owner_id": "013907871322", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -132,6 +140,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -148,6 +157,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -163,6 +173,7 @@ "ami_id": "ami-56ec3e2f", "state": "available", "public": true, + "image_location": "amazon/getting-started", "owner_id": "801119661308", "sriov": "simple", "root_device_type": "ebs", @@ -180,6 +191,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -196,6 +208,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -212,6 +225,7 @@ "state": "available", "public": true, "owner_id": "137112412989", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/xvda", @@ -228,6 +242,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -244,6 +259,7 @@ "state": "available", "public": true, "owner_id": "099720109477", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -260,6 +276,7 @@ "state": "available", "public": true, "owner_id": "137112412989", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -276,6 +293,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -292,6 +310,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -308,6 +327,7 @@ "state": "available", "public": true, "owner_id": "898082745236", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/xvda", @@ -324,6 +344,7 @@ "state": "available", "public": true, "owner_id": "898082745236", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -340,6 +361,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -356,6 +378,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -372,6 +395,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -388,6 +412,7 @@ "state": "available", "public": true, "owner_id": "309956199498", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -404,6 +429,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -420,6 +446,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -436,6 +463,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -452,6 +480,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -468,6 +497,7 @@ "state": "available", "public": true, "owner_id": "898082745236", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -484,6 +514,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -500,6 +531,7 @@ "state": "available", "public": true, "owner_id": "801119661308", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda1", @@ -516,6 +548,7 @@ "state": "available", "public": true, "owner_id": "898082745236", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/xvda", @@ -532,6 +565,7 @@ "state": "available", "public": true, "owner_id": "013907871322", + "image_location": "amazon/getting-started", "sriov": "simple", "root_device_type": "ebs", "root_device_name": "/dev/sda", diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index 19e6d31a1..17e1e228d 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -11,7 +11,7 @@ class AmisResponse(BaseResponse): instance_id = self._get_param('InstanceId') if self.is_not_dryrun('CreateImage'): image = self.ec2_backend.create_image( - instance_id, name, description) + instance_id, name, description, context=self) template = self.response_template(CREATE_IMAGE_RESPONSE) return template.render(image=image) @@ -39,7 +39,8 @@ class AmisResponse(BaseResponse): owners = self._get_multi_param('Owner') exec_users = self._get_multi_param('ExecutableBy') images = self.ec2_backend.describe_images( - ami_ids=ami_ids, filters=filters, exec_users=exec_users, owners=owners) + ami_ids=ami_ids, filters=filters, exec_users=exec_users, + owners=owners, context=self) template = self.response_template(DESCRIBE_IMAGES_RESPONSE) return template.render(images=images) @@ -112,12 +113,12 @@ DESCRIBE_IMAGES_RESPONSE = """ + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + {{ snapshot.id }} +""" + DESCRIBE_SNAPSHOTS_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE @@ -232,7 +272,7 @@ DESCRIBE_SNAPSHOTS_RESPONSE = """{{ eni.subnet.id }} {{ eni.subnet.vpc_id }} us-west-2a + {% if eni.description %} + {{ eni.description }} + {% else %} + {% endif %} 498654062920 false pending @@ -121,7 +126,7 @@ DESCRIBE_NETWORK_INTERFACES_RESPONSE = """{{ eni.subnet.id }} {{ eni.subnet.vpc_id }} us-west-2a - Primary network interface + {{ eni.description }} 190610284047 false {% if eni.attachment_id %} diff --git a/moto/ec2/responses/instances.py b/moto/ec2/responses/instances.py index cae9631ea..82c2b1997 100644 --- a/moto/ec2/responses/instances.py +++ b/moto/ec2/responses/instances.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals from boto.ec2.instancetype import InstanceType + +from moto.autoscaling import autoscaling_backends from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from moto.ec2.utils import filters_from_querystring, \ @@ -45,6 +47,8 @@ class InstanceResponse(BaseResponse): private_ip = self._get_param('PrivateIpAddress') associate_public_ip = self._get_param('AssociatePublicIpAddress') key_name = self._get_param('KeyName') + ebs_optimized = self._get_param('EbsOptimized') + instance_initiated_shutdown_behavior = self._get_param("InstanceInitiatedShutdownBehavior") tags = self._parse_tag_specification("TagSpecification") region_name = self.region @@ -54,7 +58,7 @@ class InstanceResponse(BaseResponse): instance_type=instance_type, placement=placement, region_name=region_name, subnet_id=subnet_id, owner_id=owner_id, key_name=key_name, security_group_ids=security_group_ids, nics=nics, private_ip=private_ip, associate_public_ip=associate_public_ip, - tags=tags) + tags=tags, ebs_optimized=ebs_optimized, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior) template = self.response_template(EC2_RUN_INSTANCES) return template.render(reservation=new_reservation) @@ -63,6 +67,7 @@ class InstanceResponse(BaseResponse): instance_ids = self._get_multi_param('InstanceId') if self.is_not_dryrun('TerminateInstance'): instances = self.ec2_backend.terminate_instances(instance_ids) + autoscaling_backends[self.region].notify_terminate_instances(instance_ids) template = self.response_template(EC2_TERMINATE_INSTANCES) return template.render(instances=instances) @@ -112,12 +117,11 @@ class InstanceResponse(BaseResponse): # TODO this and modify below should raise IncorrectInstanceState if # instance not in stopped state attribute = self._get_param('Attribute') - key = camelcase_to_underscores(attribute) instance_id = self._get_param('InstanceId') instance, value = self.ec2_backend.describe_instance_attribute( - instance_id, key) + instance_id, attribute) - if key == "group_set": + if attribute == "groupSet": template = self.response_template( EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE) else: @@ -242,7 +246,8 @@ EC2_RUN_INSTANCES = """59dbff89-35bd-4eac-99ed-be587EXAMPLE {{ instance.id }} <{{ attribute }}> - {% for sg_id in value %} + {% for sg in value %} - {{ sg_id }} + {{ sg.id }} {% endfor %} diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index 9118c01b3..4aecfcf78 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -11,6 +11,29 @@ def try_parse_int(value, default=None): return default +def parse_sg_attributes_from_dict(sg_attributes): + ip_protocol = sg_attributes.get('IpProtocol', [None])[0] + from_port = sg_attributes.get('FromPort', [None])[0] + to_port = sg_attributes.get('ToPort', [None])[0] + + ip_ranges = [] + ip_ranges_tree = sg_attributes.get('IpRanges') or {} + for ip_range_idx in sorted(ip_ranges_tree.keys()): + ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0]) + + source_groups = [] + source_group_ids = [] + groups_tree = sg_attributes.get('Groups') or {} + for group_idx in sorted(groups_tree.keys()): + group_dict = groups_tree[group_idx] + if 'GroupId' in group_dict: + source_group_ids.append(group_dict['GroupId'][0]) + elif 'GroupName' in group_dict: + source_groups.append(group_dict['GroupName'][0]) + + return ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids + + class SecurityGroups(BaseResponse): def _process_rules_from_querystring(self): @@ -29,28 +52,17 @@ class SecurityGroups(BaseResponse): d = d[subkey] d[key_splitted[-1]] = value + if 'IpPermissions' not in querytree: + # Handle single rule syntax + ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(querytree) + yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges, + source_groups, source_group_ids) + ip_permissions = querytree.get('IpPermissions') or {} for ip_permission_idx in sorted(ip_permissions.keys()): ip_permission = ip_permissions[ip_permission_idx] - ip_protocol = ip_permission.get('IpProtocol', [None])[0] - from_port = ip_permission.get('FromPort', [None])[0] - to_port = ip_permission.get('ToPort', [None])[0] - - ip_ranges = [] - ip_ranges_tree = ip_permission.get('IpRanges') or {} - for ip_range_idx in sorted(ip_ranges_tree.keys()): - ip_ranges.append(ip_ranges_tree[ip_range_idx]['CidrIp'][0]) - - source_groups = [] - source_group_ids = [] - groups_tree = ip_permission.get('Groups') or {} - for group_idx in sorted(groups_tree.keys()): - group_dict = groups_tree[group_idx] - if 'GroupId' in group_dict: - source_group_ids.append(group_dict['GroupId'][0]) - elif 'GroupName' in group_dict: - source_groups.append(group_dict['GroupName'][0]) + ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids = parse_sg_attributes_from_dict(ip_permission) yield (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids) @@ -179,8 +191,12 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = """{{ subnet.vpc_id }} {{ subnet.cidr_block }} 251 - {{ subnet.availability_zone }} - - {% for tag in subnet.get_tags() %} - - {{ tag.resource_id }} - {{ tag.resource_type }} - {{ tag.key }} - {{ tag.value }} - - {% endfor %} - + {{ subnet._availability_zone.name }} + {{ subnet._availability_zone.zone_id }} + {{ subnet.default_for_az }} + {{ subnet.map_public_ip_on_launch }} + {{ subnet.owner_id }} + {{ subnet.assign_ipv6_address_on_creation }} + {{ subnet.ipv6_cidr_block_associations }} + arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }} """ @@ -80,19 +84,26 @@ DESCRIBE_SUBNETS_RESPONSE = """ {{ subnet.vpc_id }} {{ subnet.cidr_block }} 251 - {{ subnet.availability_zone }} + {{ subnet._availability_zone.name }} + {{ subnet._availability_zone.zone_id }} {{ subnet.default_for_az }} {{ subnet.map_public_ip_on_launch }} - - {% for tag in subnet.get_tags() %} - - {{ tag.resource_id }} - {{ tag.resource_type }} - {{ tag.key }} - {{ tag.value }} - - {% endfor %} - + {{ subnet.owner_id }} + {{ subnet.assign_ipv6_address_on_creation }} + {{ subnet.ipv6_cidr_block_associations }} + arn:aws:ec2:{{ subnet._availability_zone.name[0:-1] }}:{{ subnet.owner_id }}:subnet/{{ subnet.id }} + {% if subnet.get_tags() %} + + {% for tag in subnet.get_tags() %} + + {{ tag.resource_id }} + {{ tag.resource_type }} + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + + {% endif %} {% endfor %} diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 1bccce4f6..68bae72da 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -5,8 +5,12 @@ from moto.core.responses import BaseResponse class VPCPeeringConnections(BaseResponse): def create_vpc_peering_connection(self): + peer_region = self._get_param('PeerRegion') + if peer_region == self.region or peer_region is None: + peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId')) + else: + peer_vpc = self.ec2_backend.get_cross_vpc(self._get_param('PeerVpcId'), peer_region) vpc = self.ec2_backend.get_vpc(self._get_param('VpcId')) - peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId')) vpc_pcx = self.ec2_backend.create_vpc_peering_connection(vpc, peer_vpc) template = self.response_template( CREATE_VPC_PEERING_CONNECTION_RESPONSE) @@ -41,54 +45,64 @@ class VPCPeeringConnections(BaseResponse): CREATE_VPC_PEERING_CONNECTION_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - {{ vpc_pcx.id }} + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + {{ vpc_pcx.id }} - 777788889999 - {{ vpc_pcx.vpc.id }} - {{ vpc_pcx.vpc.cidr_block }} + 777788889999 + {{ vpc_pcx.vpc.id }} + {{ vpc_pcx.vpc.cidr_block }} + + false + false + false + 123456789012 {{ vpc_pcx.peer_vpc.id }} - initiating-request - Initiating request to {accepter ID}. + initiating-request + Initiating Request to {accepter ID} 2014-02-18T14:37:25.000Z - + """ DESCRIBE_VPC_PEERING_CONNECTIONS_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - {% for vpc_pcx in vpc_pcxs %} - - {{ vpc_pcx.id }} - - 777788889999 - {{ vpc_pcx.vpc.id }} - {{ vpc_pcx.vpc.cidr_block }} - - - 123456789012 - {{ vpc_pcx.peer_vpc.id }} - - - {{ vpc_pcx._status.code }} - {{ vpc_pcx._status.message }} - - 2014-02-17T16:00:50.000Z - - - {% endfor %} - + +7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + {% for vpc_pcx in vpc_pcxs %} + + {{ vpc_pcx.id }} + + 777788889999 + {{ vpc_pcx.vpc.id }} + {{ vpc_pcx.vpc.cidr_block }} + + + 123456789012 + {{ vpc_pcx.peer_vpc.id }} + {{ vpc_pcx.peer_vpc.cidr_block }} + + false + true + false + + + + {{ vpc_pcx._status.code }} + {{ vpc_pcx._status.message }} + + + + {% endfor %} + """ @@ -100,19 +114,24 @@ DELETE_VPC_PEERING_CONNECTION_RESPONSE = """ """ ACCEPT_VPC_PEERING_CONNECTION_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc_pcx.id }} - 123456789012 + 777788889999 {{ vpc_pcx.vpc.id }} {{ vpc_pcx.vpc.cidr_block }} - 777788889999 + 123456789012 {{ vpc_pcx.peer_vpc.id }} {{ vpc_pcx.peer_vpc.cidr_block }} + + false + false + false + {{ vpc_pcx._status.code }} diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py index 8a53151e0..88673d863 100644 --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -9,9 +9,12 @@ class VPCs(BaseResponse): def create_vpc(self): cidr_block = self._get_param('CidrBlock') instance_tenancy = self._get_param('InstanceTenancy', if_none='default') - vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy) + amazon_provided_ipv6_cidr_blocks = self._get_param('AmazonProvidedIpv6CidrBlock') + vpc = self.ec2_backend.create_vpc(cidr_block, instance_tenancy, + amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_blocks) + doc_date = '2013-10-15' if 'Boto/' in self.headers.get('user-agent', '') else '2016-11-15' template = self.response_template(CREATE_VPC_RESPONSE) - return template.render(vpc=vpc) + return template.render(vpc=vpc, doc_date=doc_date) def delete_vpc(self): vpc_id = self._get_param('VpcId') @@ -23,8 +26,9 @@ class VPCs(BaseResponse): vpc_ids = self._get_multi_param('VpcId') filters = filters_from_querystring(self.querystring) vpcs = self.ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters) + doc_date = '2013-10-15' if 'Boto/' in self.headers.get('user-agent', '') else '2016-11-15' template = self.response_template(DESCRIBE_VPCS_RESPONSE) - return template.render(vpcs=vpcs) + return template.render(vpcs=vpcs, doc_date=doc_date) def describe_vpc_attribute(self): vpc_id = self._get_param('VpcId') @@ -45,14 +49,63 @@ class VPCs(BaseResponse): vpc_id, attr_name, attr_value) return MODIFY_VPC_ATTRIBUTE_RESPONSE + def associate_vpc_cidr_block(self): + vpc_id = self._get_param('VpcId') + amazon_provided_ipv6_cidr_blocks = self._get_param('AmazonProvidedIpv6CidrBlock') + # todo test on AWS if can create an association for IPV4 and IPV6 in the same call? + cidr_block = self._get_param('CidrBlock') if not amazon_provided_ipv6_cidr_blocks else None + value = self.ec2_backend.associate_vpc_cidr_block(vpc_id, cidr_block, amazon_provided_ipv6_cidr_blocks) + if not amazon_provided_ipv6_cidr_blocks: + render_template = ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE + else: + render_template = IPV6_ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE + template = self.response_template(render_template) + return template.render(vpc_id=vpc_id, value=value, cidr_block=value['cidr_block'], + association_id=value['association_id'], cidr_block_state='associating') + + def disassociate_vpc_cidr_block(self): + association_id = self._get_param('AssociationId') + value = self.ec2_backend.disassociate_vpc_cidr_block(association_id) + if "::" in value.get('cidr_block', ''): + render_template = IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE + else: + render_template = DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE + template = self.response_template(render_template) + return template.render(vpc_id=value['vpc_id'], cidr_block=value['cidr_block'], + association_id=value['association_id'], cidr_block_state='disassociating') + CREATE_VPC_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc.id }} pending {{ vpc.cidr_block }} + {% if doc_date == "2016-11-15" %} + + {% for assoc in vpc.get_cidr_block_association_set() %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + + {% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + {% endif %} {% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-1a2b3c4d2{% endif %} {{ vpc.instance_tenancy }} @@ -69,14 +122,38 @@ CREATE_VPC_RESPONSE = """ """ DESCRIBE_VPCS_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + 7a62c442-3484-4f42-9342-6942EXAMPLE {% for vpc in vpcs %} {{ vpc.id }} {{ vpc.state }} {{ vpc.cidr_block }} + {% if doc_date == "2016-11-15" %} + + {% for assoc in vpc.get_cidr_block_association_set() %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + + {% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + {% endif %} {% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-7a8b9c2d{% endif %} {{ vpc.instance_tenancy }} {{ vpc.is_default }} @@ -96,14 +173,14 @@ DESCRIBE_VPCS_RESPONSE = """ """ DELETE_VPC_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE true """ DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc_id }} <{{ attribute }}> @@ -112,7 +189,59 @@ DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """ """ MODIFY_VPC_ATTRIBUTE_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE true """ + +ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" + +DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" + +IPV6_ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 33af6c54-1139-4d50-b4f7-15a8example + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" + +IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 33af6c54-1139-4d50-b4f7-15a8example + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 32122c763..a998f18ef 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -1,10 +1,19 @@ from __future__ import unicode_literals +import base64 +import hashlib import fnmatch import random import re import six +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.asymmetric import rsa +import sshpubkeys.exceptions +from sshpubkeys.keys import SSHKey + + EC2_RESOURCE_TO_PREFIX = { 'customer-gateway': 'cgw', 'dhcp-options': 'dopt', @@ -27,6 +36,7 @@ EC2_RESOURCE_TO_PREFIX = { 'reservation': 'r', 'volume': 'vol', 'vpc': 'vpc', + 'vpc-cidr-association-id': 'vpc-cidr-assoc', 'vpc-elastic-ip': 'eipalloc', 'vpc-elastic-ip-association': 'eipassoc', 'vpc-peering-connection': 'pcx', @@ -34,16 +44,17 @@ EC2_RESOURCE_TO_PREFIX = { 'vpn-gateway': 'vgw'} -EC2_PREFIX_TO_RESOURCE = dict((v, k) - for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) +EC2_PREFIX_TO_RESOURCE = dict((v, k) for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) + + +def random_resource_id(size=8): + chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f'] + resource_id = ''.join(six.text_type(random.choice(chars)) for x in range(size)) + return resource_id def random_id(prefix='', size=8): - chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f'] - - resource_id = ''.join(six.text_type(random.choice(chars)) - for x in range(size)) - return '{0}-{1}'.format(prefix, resource_id) + return '{0}-{1}'.format(prefix, random_resource_id(size)) def random_ami_id(): @@ -110,6 +121,10 @@ def random_vpc_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc']) +def random_vpc_cidr_association_id(): + return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-cidr-association-id']) + + def random_vpc_peering_connection_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-peering-connection']) @@ -165,6 +180,10 @@ def random_ip(): ) +def random_ipv6_cidr(): + return "2400:6500:{}:{}::/56".format(random_resource_id(4), random_resource_id(4)) + + def generate_route_id(route_table_id, cidr_block): return "%s~%s" % (route_table_id, cidr_block) @@ -443,23 +462,19 @@ def simple_aws_filter_to_re(filter_string): def random_key_pair(): - def random_hex(): - return chr(random.choice(list(range(48, 58)) + list(range(97, 102)))) + private_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=default_backend()) + private_key_material = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption()) + public_key_fingerprint = rsa_public_key_fingerprint(private_key.public_key()) - def random_fingerprint(): - return ':'.join([random_hex() + random_hex() for i in range(20)]) - - def random_material(): - return ''.join([ - chr(random.choice(list(range(65, 91)) + list(range(48, 58)) + - list(range(97, 102)))) - for i in range(1000) - ]) - material = "---- BEGIN RSA PRIVATE KEY ----" + random_material() + \ - "-----END RSA PRIVATE KEY-----" return { - 'fingerprint': random_fingerprint(), - 'material': material + 'fingerprint': public_key_fingerprint, + 'material': private_key_material.decode('ascii') } @@ -525,3 +540,28 @@ def generate_instance_identity_document(instance): } return document + + +def rsa_public_key_parse(key_material): + try: + if not isinstance(key_material, six.binary_type): + key_material = key_material.encode("ascii") + + decoded_key = base64.b64decode(key_material).decode("ascii") + public_key = SSHKey(decoded_key) + except (sshpubkeys.exceptions.InvalidKeyException, UnicodeDecodeError): + raise ValueError('bad key') + + if not public_key.rsa: + raise ValueError('bad key') + + return public_key.rsa + + +def rsa_public_key_fingerprint(rsa_public_key): + key_data = rsa_public_key.public_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PublicFormat.SubjectPublicKeyInfo) + fingerprint_hex = hashlib.md5(key_data).hexdigest() + fingerprint = re.sub(r'([a-f0-9]{2})(?!$)', r'\1:', fingerprint_hex) + return fingerprint diff --git a/moto/ecr/models.py b/moto/ecr/models.py index f5b6f24e4..b03f25dee 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -1,15 +1,17 @@ from __future__ import unicode_literals -# from datetime import datetime + +import hashlib +import re +from copy import copy +from datetime import datetime from random import random +from botocore.exceptions import ParamValidationError + from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends -from copy import copy -import hashlib - from moto.ecr.exceptions import ImageNotFoundException, RepositoryNotFoundException - DEFAULT_REGISTRY_ID = '012345678910' @@ -97,14 +99,15 @@ class Repository(BaseObject): class Image(BaseObject): - def __init__(self, tag, manifest, repository, registry_id=DEFAULT_REGISTRY_ID): + def __init__(self, tag, manifest, repository, digest=None, registry_id=DEFAULT_REGISTRY_ID): self.image_tag = tag + self.image_tags = [tag] if tag is not None else [] self.image_manifest = manifest self.image_size_in_bytes = 50 * 1024 * 1024 self.repository = repository self.registry_id = registry_id - self.image_digest = None - self.image_pushed_at = None + self.image_digest = digest + self.image_pushed_at = str(datetime.utcnow().isoformat()) def _create_digest(self): image_contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) @@ -115,6 +118,20 @@ class Image(BaseObject): self._create_digest() return self.image_digest + def get_image_manifest(self): + return self.image_manifest + + def remove_tag(self, tag): + if tag is not None and tag in self.image_tags: + self.image_tags.remove(tag) + if self.image_tags: + self.image_tag = self.image_tags[-1] + + def update_tag(self, tag): + self.image_tag = tag + if tag not in self.image_tags and tag is not None: + self.image_tags.append(tag) + @property def response_object(self): response_object = self.gen_response_object() @@ -124,26 +141,44 @@ class Image(BaseObject): response_object['imageManifest'] = self.image_manifest response_object['repositoryName'] = self.repository response_object['registryId'] = self.registry_id - return response_object + return {k: v for k, v in response_object.items() if v is not None and v != [None]} @property def response_list_object(self): response_object = self.gen_response_object() response_object['imageTag'] = self.image_tag response_object['imageDigest'] = "i don't know" - return response_object + return {k: v for k, v in response_object.items() if v is not None and v != [None]} @property def response_describe_object(self): response_object = self.gen_response_object() - response_object['imageTags'] = [self.image_tag] + response_object['imageTags'] = self.image_tags response_object['imageDigest'] = self.get_image_digest() response_object['imageManifest'] = self.image_manifest response_object['repositoryName'] = self.repository response_object['registryId'] = self.registry_id response_object['imageSizeInBytes'] = self.image_size_in_bytes - response_object['imagePushedAt'] = '2017-05-09' - return response_object + response_object['imagePushedAt'] = self.image_pushed_at + return {k: v for k, v in response_object.items() if v is not None and v != []} + + @property + def response_batch_get_image(self): + response_object = {} + response_object['imageId'] = {} + response_object['imageId']['imageTag'] = self.image_tag + response_object['imageId']['imageDigest'] = self.get_image_digest() + response_object['imageManifest'] = self.image_manifest + response_object['repositoryName'] = self.repository + response_object['registryId'] = self.registry_id + return {k: v for k, v in response_object.items() if v is not None and v != [None]} + + @property + def response_batch_delete_image(self): + response_object = {} + response_object['imageDigest'] = self.get_image_digest() + response_object['imageTag'] = self.image_tag + return {k: v for k, v in response_object.items() if v is not None and v != [None]} class ECRBackend(BaseBackend): @@ -189,17 +224,22 @@ class ECRBackend(BaseBackend): """ maxResults and filtering not implemented """ - images = [] - for repository in self.repositories.values(): - if repository_name: - if repository.name != repository_name: - continue + repository = None + found = False + if repository_name in self.repositories: + repository = self.repositories[repository_name] if registry_id: - if repository.registry_id != registry_id: - continue + if repository.registry_id == registry_id: + found = True + else: + found = True - for image in repository.images: - images.append(image) + if not found: + raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) + + images = [] + for image in repository.images: + images.append(image) return images def describe_images(self, repository_name, registry_id=None, image_ids=None): @@ -215,7 +255,7 @@ class ECRBackend(BaseBackend): found = False for image in repository.images: if (('imageDigest' in image_id and image.get_image_digest() == image_id['imageDigest']) or - ('imageTag' in image_id and image.image_tag == image_id['imageTag'])): + ('imageTag' in image_id and image_id['imageTag'] in image.image_tags)): found = True response.add(image) if not found: @@ -241,9 +281,149 @@ class ECRBackend(BaseBackend): else: raise Exception("{0} is not a repository".format(repository_name)) - image = Image(image_tag, image_manifest, repository_name) - repository.images.append(image) - return image + existing_images = list(filter(lambda x: x.response_object['imageManifest'] == image_manifest, repository.images)) + if not existing_images: + # this image is not in ECR yet + image = Image(image_tag, image_manifest, repository_name) + repository.images.append(image) + return image + else: + # update existing image + existing_images[0].update_tag(image_tag) + return existing_images[0] + + def batch_get_image(self, repository_name, registry_id=None, image_ids=None, accepted_media_types=None): + if repository_name in self.repositories: + repository = self.repositories[repository_name] + else: + raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) + + if not image_ids: + raise ParamValidationError(msg='Missing required parameter in input: "imageIds"') + + response = { + 'images': [], + 'failures': [], + } + + for image_id in image_ids: + found = False + for image in repository.images: + if (('imageDigest' in image_id and image.get_image_digest() == image_id['imageDigest']) or + ('imageTag' in image_id and image.image_tag == image_id['imageTag'])): + found = True + response['images'].append(image.response_batch_get_image) + + if not found: + response['failures'].append({ + 'imageId': { + 'imageTag': image_id.get('imageTag', 'null') + }, + 'failureCode': 'ImageNotFound', + 'failureReason': 'Requested image not found' + }) + + return response + + def batch_delete_image(self, repository_name, registry_id=None, image_ids=None): + if repository_name in self.repositories: + repository = self.repositories[repository_name] + else: + raise RepositoryNotFoundException( + repository_name, registry_id or DEFAULT_REGISTRY_ID + ) + + if not image_ids: + raise ParamValidationError( + msg='Missing required parameter in input: "imageIds"' + ) + + response = { + "imageIds": [], + "failures": [] + } + + for image_id in image_ids: + image_found = False + + # Is request missing both digest and tag? + if "imageDigest" not in image_id and "imageTag" not in image_id: + response["failures"].append( + { + "imageId": {}, + "failureCode": "MissingDigestAndTag", + "failureReason": "Invalid request parameters: both tag and digest cannot be null", + } + ) + continue + + # If we have a digest, is it valid? + if "imageDigest" in image_id: + pattern = re.compile("^[0-9a-zA-Z_+\.-]+:[0-9a-fA-F]{64}") + if not pattern.match(image_id.get("imageDigest")): + response["failures"].append( + { + "imageId": { + "imageDigest": image_id.get("imageDigest", "null") + }, + "failureCode": "InvalidImageDigest", + "failureReason": "Invalid request parameters: image digest should satisfy the regex '[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+'", + } + ) + continue + + for num, image in enumerate(repository.images): + + # Search by matching both digest and tag + if "imageDigest" in image_id and "imageTag" in image_id: + if ( + image_id["imageDigest"] == image.get_image_digest() and + image_id["imageTag"] in image.image_tags + ): + image_found = True + for image_tag in reversed(image.image_tags): + repository.images[num].image_tag = image_tag + response["imageIds"].append( + image.response_batch_delete_image + ) + repository.images[num].remove_tag(image_tag) + del repository.images[num] + + # Search by matching digest + elif "imageDigest" in image_id and image.get_image_digest() == image_id["imageDigest"]: + image_found = True + for image_tag in reversed(image.image_tags): + repository.images[num].image_tag = image_tag + response["imageIds"].append(image.response_batch_delete_image) + repository.images[num].remove_tag(image_tag) + del repository.images[num] + + # Search by matching tag + elif "imageTag" in image_id and image_id["imageTag"] in image.image_tags: + image_found = True + repository.images[num].image_tag = image_id["imageTag"] + response["imageIds"].append(image.response_batch_delete_image) + if len(image.image_tags) > 1: + repository.images[num].remove_tag(image_id["imageTag"]) + else: + repository.images.remove(image) + + if not image_found: + failure_response = { + "imageId": {}, + "failureCode": "ImageNotFound", + "failureReason": "Requested image not found", + } + + if "imageDigest" in image_id: + failure_response["imageId"]["imageDigest"] = image_id.get("imageDigest", "null") + + if "imageTag" in image_id: + failure_response["imageId"]["imageTag"] = image_id.get("imageTag", "null") + + response["failures"].append(failure_response) + + return response ecr_backends = {} diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index 6207de4eb..f758176ad 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -5,7 +5,7 @@ from datetime import datetime import time from moto.core.responses import BaseResponse -from .models import ecr_backends +from .models import ecr_backends, DEFAULT_REGISTRY_ID class ECRResponse(BaseResponse): @@ -84,14 +84,21 @@ class ECRResponse(BaseResponse): 'ECR.batch_check_layer_availability is not yet implemented') def batch_delete_image(self): - if self.is_not_dryrun('BatchDeleteImage'): - raise NotImplementedError( - 'ECR.batch_delete_image is not yet implemented') + repository_str = self._get_param('repositoryName') + registry_id = self._get_param('registryId') + image_ids = self._get_param('imageIds') + + response = self.ecr_backend.batch_delete_image(repository_str, registry_id, image_ids) + return json.dumps(response) def batch_get_image(self): - if self.is_not_dryrun('BatchGetImage'): - raise NotImplementedError( - 'ECR.batch_get_image is not yet implemented') + repository_str = self._get_param('repositoryName') + registry_id = self._get_param('registryId') + image_ids = self._get_param('imageIds') + accepted_media_types = self._get_param('acceptedMediaTypes') + + response = self.ecr_backend.batch_get_image(repository_str, registry_id, image_ids, accepted_media_types) + return json.dumps(response) def can_paginate(self): if self.is_not_dryrun('CanPaginate'): @@ -116,7 +123,7 @@ class ECRResponse(BaseResponse): def get_authorization_token(self): registry_ids = self._get_param('registryIds') if not registry_ids: - registry_ids = [self.region] + registry_ids = [DEFAULT_REGISTRY_ID] auth_data = [] for registry_id in registry_ids: password = '{}-auth-token'.format(registry_id) @@ -124,7 +131,7 @@ class ECRResponse(BaseResponse): auth_data.append({ 'authorizationToken': auth_token, 'expiresAt': time.mktime(datetime(2015, 1, 1).timetuple()), - 'proxyEndpoint': 'https://012345678910.dkr.ecr.{}.amazonaws.com'.format(registry_id) + 'proxyEndpoint': 'https://{}.dkr.ecr.{}.amazonaws.com'.format(registry_id, self.region) }) return json.dumps({'authorizationData': auth_data}) diff --git a/moto/ecr/urls.py b/moto/ecr/urls.py index 86b8a8dbc..5b12cd843 100644 --- a/moto/ecr/urls.py +++ b/moto/ecr/urls.py @@ -3,6 +3,7 @@ from .responses import ECRResponse url_bases = [ "https?://ecr.(.+).amazonaws.com", + "https?://api.ecr.(.+).amazonaws.com", ] url_paths = { diff --git a/moto/ecs/exceptions.py b/moto/ecs/exceptions.py new file mode 100644 index 000000000..bb7e685c8 --- /dev/null +++ b/moto/ecs/exceptions.py @@ -0,0 +1,13 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class ServiceNotFoundException(RESTError): + code = 400 + + def __init__(self, service_name): + super(ServiceNotFoundException, self).__init__( + error_type="ServiceNotFoundException", + message="The service {0} does not exist".format(service_name), + template='error_json', + ) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index b44184033..a314c7776 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import uuid from datetime import datetime from random import random, randint +import boto3 import pytz from moto.core.exceptions import JsonRESTError @@ -9,6 +10,8 @@ from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends from copy import copy +from .exceptions import ServiceNotFoundException + class BaseObject(BaseModel): @@ -23,7 +26,7 @@ class BaseObject(BaseModel): def gen_response_object(self): response_object = copy(self.__dict__) - for key, value in response_object.items(): + for key, value in self.__dict__.items(): if '_' in key: response_object[self.camelCase(key)] = value del response_object[key] @@ -60,7 +63,11 @@ class Cluster(BaseObject): @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): - properties = cloudformation_json['Properties'] + # if properties is not provided, cloudformation will use the default values for all properties + if 'Properties' in cloudformation_json: + properties = cloudformation_json['Properties'] + else: + properties = {} ecs_backend = ecs_backends[region_name] return ecs_backend.create_cluster( @@ -87,6 +94,12 @@ class Cluster(BaseObject): # no-op when nothing changed between old and new resources return original_resource + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + if attribute_name == 'Arn': + return self.arn + raise UnformattedGetAttTemplateException() + class TaskDefinition(BaseObject): @@ -108,6 +121,10 @@ class TaskDefinition(BaseObject): del response_object['arn'] return response_object + @property + def physical_resource_id(self): + return self.arn + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -168,7 +185,7 @@ class Task(BaseObject): class Service(BaseObject): - def __init__(self, cluster, service_name, task_definition, desired_count): + def __init__(self, cluster, service_name, task_definition, desired_count, load_balancers=None, scheduling_strategy=None): self.cluster_arn = cluster.arn self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format( service_name) @@ -190,7 +207,8 @@ class Service(BaseObject): 'updatedAt': datetime.now(pytz.utc), } ] - self.load_balancers = [] + self.load_balancers = load_balancers if load_balancers is not None else [] + self.scheduling_strategy = scheduling_strategy if scheduling_strategy is not None else 'REPLICA' self.pending_count = 0 @property @@ -203,6 +221,7 @@ class Service(BaseObject): del response_object['name'], response_object['arn'] response_object['serviceName'] = self.name response_object['serviceArn'] = self.arn + response_object['schedulingStrategy'] = self.scheduling_strategy for deployment in response_object['deployments']: if isinstance(deployment['createdAt'], datetime): @@ -258,10 +277,16 @@ class Service(BaseObject): else: return ecs_backend.update_service(cluster_name, service_name, task_definition, desired_count) + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + if attribute_name == 'Name': + return self.name + raise UnformattedGetAttTemplateException() + class ContainerInstance(BaseObject): - def __init__(self, ec2_instance_id): + def __init__(self, ec2_instance_id, region_name): self.ec2_instance_id = ec2_instance_id self.agent_connected = True self.status = 'ACTIVE' @@ -321,12 +346,41 @@ class ContainerInstance(BaseObject): 'agentHash': '4023248', 'dockerVersion': 'DockerVersion: 1.5.0' } - - self.attributes = {} + ec2_backend = ec2_backends[region_name] + ec2_instance = ec2_backend.get_instance(ec2_instance_id) + self.attributes = { + 'ecs.ami-id': ec2_instance.image_id, + 'ecs.availability-zone': ec2_instance.placement, + 'ecs.instance-type': ec2_instance.instance_type, + 'ecs.os-type': ec2_instance.platform if ec2_instance.platform == 'windows' else 'linux' # options are windows and linux, linux is default + } @property def response_object(self): response_object = self.gen_response_object() + response_object['attributes'] = [self._format_attribute(name, value) for name, value in response_object['attributes'].items()] + return response_object + + def _format_attribute(self, name, value): + formatted_attr = { + 'name': name, + } + if value is not None: + formatted_attr['value'] = value + return formatted_attr + + +class ClusterFailure(BaseObject): + def __init__(self, reason, cluster_name): + self.reason = reason + self.arn = "arn:aws:ecs:us-east-1:012345678910:cluster/{0}".format( + cluster_name) + + @property + def response_object(self): + response_object = self.gen_response_object() + response_object['reason'] = self.reason + response_object['arn'] = self.arn return response_object @@ -347,12 +401,19 @@ class ContainerInstanceFailure(BaseObject): class EC2ContainerServiceBackend(BaseBackend): - def __init__(self): + def __init__(self, region_name): + super(EC2ContainerServiceBackend, self).__init__() self.clusters = {} self.task_definitions = {} self.tasks = {} self.services = {} self.container_instances = {} + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) def describe_task_definition(self, task_definition_str): task_definition_name = task_definition_str.split('/')[-1] @@ -384,6 +445,7 @@ class EC2ContainerServiceBackend(BaseBackend): def describe_clusters(self, list_clusters_name=None): list_clusters = [] + failures = [] if list_clusters_name is None: if 'default' in self.clusters: list_clusters.append(self.clusters['default'].response_object) @@ -394,9 +456,8 @@ class EC2ContainerServiceBackend(BaseBackend): list_clusters.append( self.clusters[cluster_name].response_object) else: - raise Exception( - "{0} is not a cluster".format(cluster_name)) - return list_clusters + failures.append(ClusterFailure('MISSING', cluster_name)) + return list_clusters, failures def delete_cluster(self, cluster_str): cluster_name = cluster_str.split('/')[-1] @@ -479,10 +540,27 @@ class EC2ContainerServiceBackend(BaseBackend): def _calculate_task_resource_requirements(task_definition): resource_requirements = {"CPU": 0, "MEMORY": 0, "PORTS": [], "PORTS_UDP": []} for container_definition in task_definition.container_definitions: - resource_requirements["CPU"] += container_definition.get('cpu') - resource_requirements["MEMORY"] += container_definition.get("memory") - for port_mapping in container_definition.get("portMappings", []): - resource_requirements["PORTS"].append(port_mapping.get('hostPort')) + # cloudformation uses capitalized properties, while boto uses all lower case + + # CPU is optional + resource_requirements["CPU"] += container_definition.get('cpu', + container_definition.get('Cpu', 0)) + + # either memory or memory reservation must be provided + if 'Memory' in container_definition or 'MemoryReservation' in container_definition: + resource_requirements["MEMORY"] += container_definition.get( + "Memory", container_definition.get('MemoryReservation')) + else: + resource_requirements["MEMORY"] += container_definition.get( + "memory", container_definition.get('memoryReservation')) + + port_mapping_key = 'PortMappings' if 'PortMappings' in container_definition else 'portMappings' + for port_mapping in container_definition.get(port_mapping_key, []): + if 'hostPort' in port_mapping: + resource_requirements["PORTS"].append(port_mapping.get('hostPort')) + elif 'HostPort' in port_mapping: + resource_requirements["PORTS"].append(port_mapping.get('HostPort')) + return resource_requirements @staticmethod @@ -553,8 +631,9 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("tasks cannot be empty") response = [] for cluster, cluster_tasks in self.tasks.items(): - for task_id, task in cluster_tasks.items(): - if task_id in tasks or task.task_arn in tasks: + for task_arn, task in cluster_tasks.items(): + task_id = task_arn.split("/")[-1] + if task_arn in tasks or task.task_arn in tasks or any(task_id in task for task in tasks): response.append(task) return response @@ -604,7 +683,7 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("Could not find task {} on cluster {}".format( task_str, cluster_name)) - def create_service(self, cluster_str, service_name, task_definition_str, desired_count): + def create_service(self, cluster_str, service_name, task_definition_str, desired_count, load_balancers=None, scheduling_strategy=None): cluster_name = cluster_str.split('/')[-1] if cluster_name in self.clusters: cluster = self.clusters[cluster_name] @@ -612,18 +691,23 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("{0} is not a cluster".format(cluster_name)) task_definition = self.describe_task_definition(task_definition_str) desired_count = desired_count if desired_count is not None else 0 + service = Service(cluster, service_name, - task_definition, desired_count) + task_definition, desired_count, load_balancers, scheduling_strategy) cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) self.services[cluster_service_pair] = service + return service - def list_services(self, cluster_str): + def list_services(self, cluster_str, scheduling_strategy=None): cluster_name = cluster_str.split('/')[-1] service_arns = [] for key, value in self.services.items(): if cluster_name + ':' in key: - service_arns.append(self.services[key].arn) + service = self.services[key] + if scheduling_strategy is None or service.scheduling_strategy == scheduling_strategy: + service_arns.append(service.arn) + return sorted(service_arns) def describe_services(self, cluster_str, service_names_or_arns): @@ -650,8 +734,7 @@ class EC2ContainerServiceBackend(BaseBackend): cluster_service_pair].desired_count = desired_count return self.services[cluster_service_pair] else: - raise Exception("cluster {0} or service {1} does not exist".format( - cluster_name, service_name)) + raise ServiceNotFoundException(service_name) def delete_service(self, cluster_name, service_name): cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) @@ -669,7 +752,7 @@ class EC2ContainerServiceBackend(BaseBackend): cluster_name = cluster_str.split('/')[-1] if cluster_name not in self.clusters: raise Exception("{0} is not a cluster".format(cluster_name)) - container_instance = ContainerInstance(ec2_instance_id) + container_instance = ContainerInstance(ec2_instance_id, self.region_name) if not self.container_instances.get(cluster_name): self.container_instances[cluster_name] = {} container_instance_id = container_instance.container_instance_arn.split( @@ -715,6 +798,8 @@ class EC2ContainerServiceBackend(BaseBackend): Container instances status should be one of [ACTIVE,DRAINING]") failures = [] container_instance_objects = [] + list_container_instance_ids = [x.split('/')[-1] + for x in list_container_instance_ids] for container_instance_id in list_container_instance_ids: container_instance = self.container_instances[cluster_name].get(container_instance_id, None) if container_instance is not None: @@ -866,6 +951,5 @@ class EC2ContainerServiceBackend(BaseBackend): yield task_fam -ecs_backends = {} -for region, ec2_backend in ec2_backends.items(): - ecs_backends[region] = EC2ContainerServiceBackend() +available_regions = boto3.session.Session().get_available_regions("ecs") +ecs_backends = {region: EC2ContainerServiceBackend(region) for region in available_regions} diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index e81e04145..92b769fad 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -45,10 +45,10 @@ class EC2ContainerServiceResponse(BaseResponse): def describe_clusters(self): list_clusters_name = self._get_param('clusters') - clusters = self.ecs_backend.describe_clusters(list_clusters_name) + clusters, failures = self.ecs_backend.describe_clusters(list_clusters_name) return json.dumps({ 'clusters': clusters, - 'failures': [] + 'failures': [cluster.response_object for cluster in failures] }) def delete_cluster(self): @@ -153,15 +153,18 @@ class EC2ContainerServiceResponse(BaseResponse): service_name = self._get_param('serviceName') task_definition_str = self._get_param('taskDefinition') desired_count = self._get_int_param('desiredCount') + load_balancers = self._get_param('loadBalancers') + scheduling_strategy = self._get_param('schedulingStrategy') service = self.ecs_backend.create_service( - cluster_str, service_name, task_definition_str, desired_count) + cluster_str, service_name, task_definition_str, desired_count, load_balancers, scheduling_strategy) return json.dumps({ 'service': service.response_object }) def list_services(self): cluster_str = self._get_param('cluster') - service_arns = self.ecs_backend.list_services(cluster_str) + scheduling_strategy = self._get_param('schedulingStrategy') + service_arns = self.ecs_backend.list_services(cluster_str, scheduling_strategy) return json.dumps({ 'serviceArns': service_arns # , diff --git a/moto/elb/models.py b/moto/elb/models.py index 504c68908..8781620f1 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -268,7 +268,7 @@ class ELBBackend(BaseBackend): protocol = port['protocol'] instance_port = port['instance_port'] lb_port = port['load_balancer_port'] - ssl_certificate_id = port.get('sslcertificate_id') + ssl_certificate_id = port.get('ssl_certificate_id') for listener in balancer.listeners: if lb_port == listener.load_balancer_port: if protocol != listener.protocol: diff --git a/moto/elb/responses.py b/moto/elb/responses.py index b1980c9b2..b512f56e9 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -61,7 +61,7 @@ class ELBResponse(BaseResponse): start = all_names.index(marker) + 1 else: start = 0 - page_size = self._get_param('PageSize', 50) # the default is 400, but using 50 to make testing easier + page_size = self._get_int_param('PageSize', 50) # the default is 400, but using 50 to make testing easier load_balancers_resp = all_load_balancers[start:start + page_size] next_marker = None if len(all_load_balancers) > start + page_size: @@ -259,12 +259,22 @@ class ELBResponse(BaseResponse): def describe_instance_health(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [list(param.values())[0] for param in self._get_list_prefix('Instances.member')] - if len(instance_ids) == 0: - instance_ids = self.elb_backend.get_load_balancer( - load_balancer_name).instance_ids + provided_instance_ids = [ + list(param.values())[0] + for param in self._get_list_prefix('Instances.member') + ] + registered_instances_id = self.elb_backend.get_load_balancer( + load_balancer_name).instance_ids + if len(provided_instance_ids) == 0: + provided_instance_ids = registered_instances_id template = self.response_template(DESCRIBE_INSTANCE_HEALTH_TEMPLATE) - return template.render(instance_ids=instance_ids) + instances = [] + for instance_id in provided_instance_ids: + state = "InService" \ + if instance_id in registered_instances_id\ + else "Unknown" + instances.append({"InstanceId": instance_id, "State": state}) + return template.render(instances=instances) def add_tags(self): @@ -689,11 +699,11 @@ SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE = """ - {% for instance_id in instance_ids %} + {% for instance in instances %} N/A - {{ instance_id }} - InService + {{ instance['InstanceId'] }} + {{ instance['State'] }} N/A {% endfor %} diff --git a/moto/elbv2/exceptions.py b/moto/elbv2/exceptions.py index 0bf9649d7..11dcbcb21 100644 --- a/moto/elbv2/exceptions.py +++ b/moto/elbv2/exceptions.py @@ -131,7 +131,7 @@ class InvalidActionTypeError(ELBClientError): def __init__(self, invalid_name, index): super(InvalidActionTypeError, self).__init__( "ValidationError", - "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward]" % (invalid_name, index) + "1 validation error detected: Value '%s' at 'actions.%s.member.type' failed to satisfy constraint: Member must satisfy enum value set: [forward, redirect]" % (invalid_name, index) ) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index c3bf77534..508541f91 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -35,12 +35,13 @@ from .exceptions import ( class FakeHealthStatus(BaseModel): - def __init__(self, instance_id, port, health_port, status, reason=None): + def __init__(self, instance_id, port, health_port, status, reason=None, description=None): self.instance_id = instance_id self.port = port self.health_port = health_port self.status = status self.reason = reason + self.description = description class FakeTargetGroup(BaseModel): @@ -52,30 +53,35 @@ class FakeTargetGroup(BaseModel): vpc_id, protocol, port, - healthcheck_protocol, - healthcheck_port, - healthcheck_path, - healthcheck_interval_seconds, - healthcheck_timeout_seconds, - healthy_threshold_count, - unhealthy_threshold_count, + healthcheck_protocol=None, + healthcheck_port=None, + healthcheck_path=None, + healthcheck_interval_seconds=None, + healthcheck_timeout_seconds=None, + healthy_threshold_count=None, + unhealthy_threshold_count=None, matcher=None, target_type=None): + + # TODO: default values differs when you add Network Load balancer self.name = name self.arn = arn self.vpc_id = vpc_id self.protocol = protocol self.port = port - self.healthcheck_protocol = healthcheck_protocol - self.healthcheck_port = healthcheck_port - self.healthcheck_path = healthcheck_path - self.healthcheck_interval_seconds = healthcheck_interval_seconds - self.healthcheck_timeout_seconds = healthcheck_timeout_seconds - self.healthy_threshold_count = healthy_threshold_count - self.unhealthy_threshold_count = unhealthy_threshold_count + self.healthcheck_protocol = healthcheck_protocol or 'HTTP' + self.healthcheck_port = healthcheck_port or str(self.port) + self.healthcheck_path = healthcheck_path or '/' + self.healthcheck_interval_seconds = healthcheck_interval_seconds or 30 + self.healthcheck_timeout_seconds = healthcheck_timeout_seconds or 5 + self.healthy_threshold_count = healthy_threshold_count or 5 + self.unhealthy_threshold_count = unhealthy_threshold_count or 2 self.load_balancer_arns = [] self.tags = {} - self.matcher = matcher + if matcher is None: + self.matcher = {'HttpCode': '200'} + else: + self.matcher = matcher self.target_type = target_type self.attributes = { @@ -107,10 +113,14 @@ class FakeTargetGroup(BaseModel): raise TooManyTagsError() self.tags[key] = value - def health_for(self, target): + def health_for(self, target, ec2_backend): t = self.targets.get(target['id']) if t is None: raise InvalidTargetError() + if t['id'].startswith("i-"): # EC2 instance ID + instance = ec2_backend.get_instance_by_id(t['id']) + if instance.state == "stopped": + return FakeHealthStatus(t['id'], t['port'], self.healthcheck_port, 'unused', 'Target.InvalidState', 'Target is in the stopped state') return FakeHealthStatus(t['id'], t['port'], self.healthcheck_port, 'healthy') @classmethod @@ -119,10 +129,7 @@ class FakeTargetGroup(BaseModel): elbv2_backend = elbv2_backends[region_name] - # per cloudformation docs: - # The target group name should be shorter than 22 characters because - # AWS CloudFormation uses the target group name to create the name of the load balancer. - name = properties.get('Name', resource_name[:22]) + name = properties.get('Name') vpc_id = properties.get("VpcId") protocol = properties.get('Protocol') port = properties.get("Port") @@ -202,8 +209,20 @@ class FakeListener(BaseModel): # transform default actions to confirm with the rest of the code and XML templates if "DefaultActions" in properties: default_actions = [] - for action in properties['DefaultActions']: - default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']}) + for i, action in enumerate(properties['DefaultActions']): + action_type = action['Type'] + if action_type == 'forward': + default_actions.append({'type': action_type, 'target_group_arn': action['TargetGroupArn']}) + elif action_type == 'redirect': + redirect_action = {'type': action_type, } + for redirect_config_key, redirect_config_value in action['RedirectConfig'].items(): + # need to match the output of _get_list_prefix + if redirect_config_key == 'StatusCode': + redirect_config_key = 'status_code' + redirect_action['redirect_config._' + redirect_config_key.lower()] = redirect_config_value + default_actions.append(redirect_action) + else: + raise InvalidActionTypeError(action_type, i + 1) else: default_actions = None @@ -293,11 +312,32 @@ class FakeLoadBalancer(BaseModel): return load_balancer def get_cfn_attribute(self, attribute_name): - attributes = { - 'DNSName': self.dns_name, - 'LoadBalancerName': self.name, - } - return attributes[attribute_name] + ''' + Implemented attributes: + * DNSName + * LoadBalancerName + + Not implemented: + * CanonicalHostedZoneID + * LoadBalancerFullName + * SecurityGroups + + This method is similar to models.py:FakeLoadBalancer.get_cfn_attribute() + ''' + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + not_implemented_yet = [ + 'CanonicalHostedZoneID', + 'LoadBalancerFullName', + 'SecurityGroups', + ] + if attribute_name == 'DNSName': + return self.dns_name + elif attribute_name == 'LoadBalancerName': + return self.name + elif attribute_name in not_implemented_yet: + raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "%s" ]"' % attribute_name) + else: + raise UnformattedGetAttTemplateException() class ELBv2Backend(BaseBackend): @@ -394,11 +434,15 @@ class ELBv2Backend(BaseBackend): for i, action in enumerate(actions): index = i + 1 action_type = action['type'] - if action_type not in ['forward']: + if action_type == 'forward': + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: raise InvalidActionTypeError(action_type, index) - action_target_group_arn = action['target_group_arn'] - if action_target_group_arn not in target_group_arns: - raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' @@ -411,7 +455,7 @@ class ELBv2Backend(BaseBackend): def create_target_group(self, name, **kwargs): if len(name) > 32: raise InvalidTargetGroupNameError( - "Target group name '%s' cannot be longer than '22' characters" % name + "Target group name '%s' cannot be longer than '32' characters" % name ) if not re.match('^[a-zA-Z0-9\-]+$', name): raise InvalidTargetGroupNameError( @@ -433,28 +477,18 @@ class ELBv2Backend(BaseBackend): raise DuplicateTargetGroupName() valid_protocols = ['HTTPS', 'HTTP', 'TCP'] - if kwargs['healthcheck_protocol'] not in valid_protocols: + if kwargs.get('healthcheck_protocol') and kwargs['healthcheck_protocol'] not in valid_protocols: raise InvalidConditionValueError( "Value {} at 'healthCheckProtocol' failed to satisfy constraint: " "Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols)) - if kwargs['protocol'] not in valid_protocols: + if kwargs.get('protocol') and kwargs['protocol'] not in valid_protocols: raise InvalidConditionValueError( "Value {} at 'protocol' failed to satisfy constraint: " "Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols)) - if FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None: + if kwargs.get('matcher') and FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None: raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...') - valid_protocols = ['HTTPS', 'HTTP', 'TCP'] - if kwargs['healthcheck_protocol'] not in valid_protocols: - raise InvalidConditionValueError( - "Value {} at 'healthCheckProtocol' failed to satisfy constraint: " - "Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols)) - if kwargs['protocol'] not in valid_protocols: - raise InvalidConditionValueError( - "Value {} at 'protocol' failed to satisfy constraint: " - "Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols)) - arn = make_arn_for_target_group(account_id=1, name=name, region_name=self.region_name) target_group = FakeTargetGroup(name, arn, **kwargs) self.target_groups[target_group.arn] = target_group @@ -470,6 +504,18 @@ class ELBv2Backend(BaseBackend): arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) balancer.listeners[listener.arn] = listener + for i, action in enumerate(default_actions): + action_type = action['type'] + if action_type == 'forward': + if action['target_group_arn'] in self.target_groups.keys(): + target_group = self.target_groups[action['target_group_arn']] + target_group.load_balancer_arns.append(load_balancer_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: + raise InvalidActionTypeError(action_type, i + 1) + return listener def describe_load_balancers(self, arns, names): @@ -632,11 +678,15 @@ class ELBv2Backend(BaseBackend): for i, action in enumerate(actions): index = i + 1 action_type = action['type'] - if action_type not in ['forward']: + if action_type == 'forward': + action_target_group_arn = action['target_group_arn'] + if action_target_group_arn not in target_group_arns: + raise ActionTargetGroupNotFoundError(action_target_group_arn) + elif action_type == 'redirect': + # nothing to do + pass + else: raise InvalidActionTypeError(action_type, index) - action_target_group_arn = action['target_group_arn'] - if action_target_group_arn not in target_group_arns: - raise ActionTargetGroupNotFoundError(action_target_group_arn) # TODO: check for error 'TooManyRegistrationsForTargetId' # TODO: check for error 'TooManyRules' @@ -667,7 +717,7 @@ class ELBv2Backend(BaseBackend): if not targets: targets = target_group.targets.values() - return [target_group.health_for(target) for target in targets] + return [target_group.health_for(target, self.ec2_backend) for target in targets] def set_rule_priorities(self, rule_priorities): # validate @@ -856,7 +906,7 @@ class ELBv2Backend(BaseBackend): # Its already validated in responses.py listener.ssl_policy = ssl_policy - if default_actions is not None: + if default_actions is not None and default_actions != []: # Is currently not validated listener.default_actions = default_actions diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index aa855b430..c98435440 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -180,14 +180,14 @@ class ELBV2Response(BaseResponse): vpc_id = self._get_param('VpcId') protocol = self._get_param('Protocol') port = self._get_param('Port') - healthcheck_protocol = self._get_param('HealthCheckProtocol', 'HTTP') - healthcheck_port = self._get_param('HealthCheckPort', 'traffic-port') - healthcheck_path = self._get_param('HealthCheckPath', '/') - healthcheck_interval_seconds = self._get_param('HealthCheckIntervalSeconds', '30') - healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds', '5') - healthy_threshold_count = self._get_param('HealthyThresholdCount', '5') - unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2') - http_codes = self._get_param('Matcher.HttpCode', '200') + healthcheck_protocol = self._get_param('HealthCheckProtocol') + healthcheck_port = self._get_param('HealthCheckPort') + healthcheck_path = self._get_param('HealthCheckPath') + healthcheck_interval_seconds = self._get_param('HealthCheckIntervalSeconds') + healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds') + healthy_threshold_count = self._get_param('HealthyThresholdCount') + unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount') + matcher = self._get_param('Matcher') target_group = self.elbv2_backend.create_target_group( name, @@ -201,7 +201,7 @@ class ELBV2Response(BaseResponse): healthcheck_timeout_seconds=healthcheck_timeout_seconds, healthy_threshold_count=healthy_threshold_count, unhealthy_threshold_count=unhealthy_threshold_count, - matcher={'HttpCode': http_codes} + matcher=matcher, ) template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE) @@ -242,7 +242,7 @@ class ELBV2Response(BaseResponse): start = all_names.index(marker) + 1 else: start = 0 - page_size = self._get_param('PageSize', 50) # the default is 400, but using 50 to make testing easier + page_size = self._get_int_param('PageSize', 50) # the default is 400, but using 50 to make testing easier load_balancers_resp = all_load_balancers[start:start + page_size] next_marker = None if len(all_load_balancers) > start + page_size: @@ -468,7 +468,7 @@ class ELBV2Response(BaseResponse): def describe_account_limits(self): # Supports paging but not worth implementing yet # marker = self._get_param('Marker') - # page_size = self._get_param('PageSize') + # page_size = self._get_int_param('PageSize') limits = { 'application-load-balancers': 20, @@ -489,7 +489,7 @@ class ELBV2Response(BaseResponse): names = self._get_multi_param('Names.member.') # Supports paging but not worth implementing yet # marker = self._get_param('Marker') - # page_size = self._get_param('PageSize') + # page_size = self._get_int_param('PageSize') policies = SSL_POLICIES if names: @@ -704,7 +704,11 @@ CREATE_RULE_TEMPLATE = """ + {% if action["type"] == "forward" %} {{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + {{ action["redirect_config"] }} + {% endif %} {% endfor %} @@ -772,7 +776,15 @@ CREATE_LISTENER_TEMPLATE = """{{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -877,7 +889,15 @@ DESCRIBE_RULES_TEMPLATE = """ + {% if action["type"] == "forward" %} {{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -970,7 +990,15 @@ DESCRIBE_LISTENERS_TEMPLATE = """{{ action["target_group_arn"] }}m + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} @@ -1180,6 +1208,12 @@ DESCRIBE_TARGET_HEALTH_TEMPLATE = """{{ action["target_group_arn"] }} + {% elif action["type"] == "redirect" %} + + {{ action["redirect_config._protocol"] }} + {{ action["redirect_config._port"] }} + {{ action["redirect_config._status_code"] }} + + {% endif %} {% endfor %} diff --git a/moto/emr/models.py b/moto/emr/models.py index 6b7147e3f..4b591acb1 100644 --- a/moto/emr/models.py +++ b/moto/emr/models.py @@ -97,7 +97,8 @@ class FakeCluster(BaseModel): visible_to_all_users='false', release_label=None, requested_ami_version=None, - running_ami_version=None): + running_ami_version=None, + custom_ami_id=None): self.id = cluster_id or random_cluster_id() emr_backend.clusters[self.id] = self self.emr_backend = emr_backend @@ -162,6 +163,7 @@ class FakeCluster(BaseModel): self.release_label = release_label self.requested_ami_version = requested_ami_version self.running_ami_version = running_ami_version + self.custom_ami_id = custom_ami_id self.role = job_flow_role or 'EMRJobflowDefault' self.service_role = service_role diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 8442e4010..c807b5f54 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -267,6 +267,18 @@ class ElasticMapReduceResponse(BaseResponse): else: kwargs['running_ami_version'] = '1.0.0' + custom_ami_id = self._get_param('CustomAmiId') + if custom_ami_id: + kwargs['custom_ami_id'] = custom_ami_id + if release_label and release_label < 'emr-5.7.0': + message = 'Custom AMI is not allowed' + raise EmrError(error_type='ValidationException', + message=message, template='error_json') + elif ami_version: + message = 'Custom AMI is not supported in this version of EMR' + raise EmrError(error_type='ValidationException', + message=message, template='error_json') + cluster = self.backend.run_job_flow(**kwargs) applications = self._get_list_prefix('Applications.member') @@ -375,6 +387,9 @@ DESCRIBE_CLUSTER_TEMPLATE = """ self.et: + d["Completed"] = True + d["CompletionDate"] = self.et.strftime("%Y-%m-%dT%H:%M:%S.000Z") + d["InventorySizeInBytes"] = 10000 + d["StatusCode"] = "Succeeded" + return d + + +class InventoryJob(Job): + + def __init__(self, job_id, tier, arn): + self.job_id = job_id + self.tier = tier + self.arn = arn + Job.__init__(self, tier) + + def to_dict(self): + d = { + "Action": "InventoryRetrieval", + "ArchiveSHA256TreeHash": None, + "Completed": False, + "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "InventorySizeInBytes": 0, + "JobDescription": None, + "JobId": self.job_id, + "RetrievalByteRange": None, + "SHA256TreeHash": None, + "SNSTopic": None, + "StatusCode": "InProgress", + "StatusMessage": None, + "VaultARN": self.arn, + "Tier": self.tier + } + if datetime.datetime.now() > self.et: + d["Completed"] = True + d["CompletionDate"] = self.et.strftime("%Y-%m-%dT%H:%M:%S.000Z") + d["InventorySizeInBytes"] = 10000 + d["StatusCode"] = "Succeeded" + return d class Vault(BaseModel): def __init__(self, vault_name, region): + self.st = datetime.datetime.now() self.vault_name = vault_name self.region = region self.archives = {} @@ -48,29 +107,57 @@ class Vault(BaseModel): return "arn:aws:glacier:{0}:012345678901:vaults/{1}".format(self.region, self.vault_name) def to_dict(self): - return { - "CreationDate": "2013-03-20T17:03:43.221Z", - "LastInventoryDate": "2013-03-20T17:03:43.221Z", - "NumberOfArchives": None, - "SizeInBytes": None, + archives_size = 0 + for k in self.archives: + archives_size += self.archives[k]["size"] + d = { + "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "LastInventoryDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "NumberOfArchives": len(self.archives), + "SizeInBytes": archives_size, "VaultARN": self.arn, "VaultName": self.vault_name, } + return d - def create_archive(self, body): - archive_id = hashlib.sha256(body).hexdigest() - self.archives[archive_id] = body + def create_archive(self, body, description): + archive_id = hashlib.md5(body).hexdigest() + self.archives[archive_id] = {} + self.archives[archive_id]["body"] = body + self.archives[archive_id]["size"] = len(body) + self.archives[archive_id]["sha256"] = hashlib.sha256(body).hexdigest() + self.archives[archive_id]["creation_date"] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z") + self.archives[archive_id]["description"] = description return archive_id def get_archive_body(self, archive_id): - return self.archives[archive_id] + return self.archives[archive_id]["body"] + + def get_archive_list(self): + archive_list = [] + for a in self.archives: + archive = self.archives[a] + aobj = { + "ArchiveId": a, + "ArchiveDescription": archive["description"], + "CreationDate": archive["creation_date"], + "Size": archive["size"], + "SHA256TreeHash": archive["sha256"] + } + archive_list.append(aobj) + return archive_list def delete_archive(self, archive_id): return self.archives.pop(archive_id) - def initiate_job(self, archive_id): + def initiate_job(self, job_type, tier, archive_id): job_id = get_job_id() - job = ArchiveJob(job_id, archive_id) + + if job_type == "inventory-retrieval": + job = InventoryJob(job_id, tier, self.arn) + elif job_type == "archive-retrieval": + job = ArchiveJob(job_id, tier, self.arn, archive_id) + self.jobs[job_id] = job return job_id @@ -80,10 +167,24 @@ class Vault(BaseModel): def describe_job(self, job_id): return self.jobs.get(job_id) + def job_ready(self, job_id): + job = self.describe_job(job_id) + jobj = job.to_dict() + return jobj["Completed"] + def get_job_output(self, job_id): job = self.describe_job(job_id) - archive_body = self.get_archive_body(job.archive_id) - return archive_body + jobj = job.to_dict() + if jobj["Action"] == "InventoryRetrieval": + archives = self.get_archive_list() + return { + "VaultARN": self.arn, + "InventoryDate": jobj["CompletionDate"], + "ArchiveList": archives + } + else: + archive_body = self.get_archive_body(job.archive_id) + return archive_body class GlacierBackend(BaseBackend): @@ -109,9 +210,9 @@ class GlacierBackend(BaseBackend): def delete_vault(self, vault_name): self.vaults.pop(vault_name) - def initiate_job(self, vault_name, archive_id): + def initiate_job(self, vault_name, job_type, tier, archive_id): vault = self.get_vault(vault_name) - job_id = vault.initiate_job(archive_id) + job_id = vault.initiate_job(job_type, tier, archive_id) return job_id def list_jobs(self, vault_name): diff --git a/moto/glacier/responses.py b/moto/glacier/responses.py index cda859b29..abdf83e4f 100644 --- a/moto/glacier/responses.py +++ b/moto/glacier/responses.py @@ -72,17 +72,25 @@ class GlacierResponse(_TemplateEnvironmentMixin): def _vault_archive_response(self, request, full_url, headers): method = request.method - body = request.body + if hasattr(request, 'body'): + body = request.body + else: + body = request.data + description = "" + if 'x-amz-archive-description' in request.headers: + description = request.headers['x-amz-archive-description'] parsed_url = urlparse(full_url) querystring = parse_qs(parsed_url.query, keep_blank_values=True) vault_name = full_url.split("/")[-2] if method == 'POST': - return self._vault_archive_response_post(vault_name, body, querystring, headers) + return self._vault_archive_response_post(vault_name, body, description, querystring, headers) + else: + return 400, headers, "400 Bad Request" - def _vault_archive_response_post(self, vault_name, body, querystring, headers): + def _vault_archive_response_post(self, vault_name, body, description, querystring, headers): vault = self.backend.get_vault(vault_name) - vault_id = vault.create_archive(body) + vault_id = vault.create_archive(body, description) headers['x-amz-archive-id'] = vault_id return 201, headers, "" @@ -110,7 +118,10 @@ class GlacierResponse(_TemplateEnvironmentMixin): def _vault_jobs_response(self, request, full_url, headers): method = request.method - body = request.body + if hasattr(request, 'body'): + body = request.body + else: + body = request.data account_id = full_url.split("/")[1] vault_name = full_url.split("/")[-2] @@ -125,11 +136,17 @@ class GlacierResponse(_TemplateEnvironmentMixin): }) elif method == 'POST': json_body = json.loads(body.decode("utf-8")) - archive_id = json_body['ArchiveId'] - job_id = self.backend.initiate_job(vault_name, archive_id) + job_type = json_body['Type'] + archive_id = None + if 'ArchiveId' in json_body: + archive_id = json_body['ArchiveId'] + if 'Tier' in json_body: + tier = json_body["Tier"] + else: + tier = "Standard" + job_id = self.backend.initiate_job(vault_name, job_type, tier, archive_id) headers['x-amz-job-id'] = job_id - headers[ - 'Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) + headers['Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) return 202, headers, "" @classmethod @@ -155,8 +172,14 @@ class GlacierResponse(_TemplateEnvironmentMixin): def _vault_jobs_output_response(self, request, full_url, headers): vault_name = full_url.split("/")[-4] job_id = full_url.split("/")[-2] - vault = self.backend.get_vault(vault_name) - output = vault.get_job_output(job_id) - headers['content-type'] = 'application/octet-stream' - return 200, headers, output + if vault.job_ready(job_id): + output = vault.get_job_output(job_id) + if isinstance(output, dict): + headers['content-type'] = 'application/json' + return 200, headers, json.dumps(output) + else: + headers['content-type'] = 'application/octet-stream' + return 200, headers, output + else: + return 404, headers, "404 Not Found" diff --git a/moto/glue/__init__.py b/moto/glue/__init__.py new file mode 100644 index 000000000..6b1f13326 --- /dev/null +++ b/moto/glue/__init__.py @@ -0,0 +1,5 @@ +from __future__ import unicode_literals +from .models import glue_backend + +glue_backends = {"global": glue_backend} +mock_glue = glue_backend.decorator diff --git a/moto/glue/exceptions.py b/moto/glue/exceptions.py new file mode 100644 index 000000000..8972adb35 --- /dev/null +++ b/moto/glue/exceptions.py @@ -0,0 +1,61 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class GlueClientError(JsonRESTError): + code = 400 + + +class AlreadyExistsException(GlueClientError): + def __init__(self, typ): + super(GlueClientError, self).__init__( + 'AlreadyExistsException', + '%s already exists.' % (typ), + ) + + +class DatabaseAlreadyExistsException(AlreadyExistsException): + def __init__(self): + super(DatabaseAlreadyExistsException, self).__init__('Database') + + +class TableAlreadyExistsException(AlreadyExistsException): + def __init__(self): + super(TableAlreadyExistsException, self).__init__('Table') + + +class PartitionAlreadyExistsException(AlreadyExistsException): + def __init__(self): + super(PartitionAlreadyExistsException, self).__init__('Partition') + + +class EntityNotFoundException(GlueClientError): + def __init__(self, msg): + super(GlueClientError, self).__init__( + 'EntityNotFoundException', + msg, + ) + + +class DatabaseNotFoundException(EntityNotFoundException): + def __init__(self, db): + super(DatabaseNotFoundException, self).__init__( + 'Database %s not found.' % db, + ) + + +class TableNotFoundException(EntityNotFoundException): + def __init__(self, tbl): + super(TableNotFoundException, self).__init__( + 'Table %s not found.' % tbl, + ) + + +class PartitionNotFoundException(EntityNotFoundException): + def __init__(self): + super(PartitionNotFoundException, self).__init__("Cannot find partition.") + + +class VersionNotFoundException(EntityNotFoundException): + def __init__(self): + super(VersionNotFoundException, self).__init__("Version not found.") diff --git a/moto/glue/models.py b/moto/glue/models.py new file mode 100644 index 000000000..0989e0e9b --- /dev/null +++ b/moto/glue/models.py @@ -0,0 +1,166 @@ +from __future__ import unicode_literals + +import time + +from moto.core import BaseBackend, BaseModel +from moto.compat import OrderedDict +from.exceptions import ( + JsonRESTError, + DatabaseAlreadyExistsException, + DatabaseNotFoundException, + TableAlreadyExistsException, + TableNotFoundException, + PartitionAlreadyExistsException, + PartitionNotFoundException, + VersionNotFoundException, +) + + +class GlueBackend(BaseBackend): + + def __init__(self): + self.databases = OrderedDict() + + def create_database(self, database_name): + if database_name in self.databases: + raise DatabaseAlreadyExistsException() + + database = FakeDatabase(database_name) + self.databases[database_name] = database + return database + + def get_database(self, database_name): + try: + return self.databases[database_name] + except KeyError: + raise DatabaseNotFoundException(database_name) + + def create_table(self, database_name, table_name, table_input): + database = self.get_database(database_name) + + if table_name in database.tables: + raise TableAlreadyExistsException() + + table = FakeTable(database_name, table_name, table_input) + database.tables[table_name] = table + return table + + def get_table(self, database_name, table_name): + database = self.get_database(database_name) + try: + return database.tables[table_name] + except KeyError: + raise TableNotFoundException(table_name) + + def get_tables(self, database_name): + database = self.get_database(database_name) + return [table for table_name, table in database.tables.items()] + + def delete_table(self, database_name, table_name): + database = self.get_database(database_name) + try: + del database.tables[table_name] + except KeyError: + raise TableNotFoundException(table_name) + return {} + + +class FakeDatabase(BaseModel): + + def __init__(self, database_name): + self.name = database_name + self.tables = OrderedDict() + + +class FakeTable(BaseModel): + + def __init__(self, database_name, table_name, table_input): + self.database_name = database_name + self.name = table_name + self.partitions = OrderedDict() + self.versions = [] + self.update(table_input) + + def update(self, table_input): + self.versions.append(table_input) + + def get_version(self, ver): + try: + if not isinstance(ver, int): + # "1" goes to [0] + ver = int(ver) - 1 + except ValueError as e: + raise JsonRESTError("InvalidInputException", str(e)) + + try: + return self.versions[ver] + except IndexError: + raise VersionNotFoundException() + + def as_dict(self, version=-1): + obj = { + 'DatabaseName': self.database_name, + 'Name': self.name, + } + obj.update(self.get_version(version)) + return obj + + def create_partition(self, partiton_input): + partition = FakePartition(self.database_name, self.name, partiton_input) + key = str(partition.values) + if key in self.partitions: + raise PartitionAlreadyExistsException() + self.partitions[str(partition.values)] = partition + + def get_partitions(self): + return [p for str_part_values, p in self.partitions.items()] + + def get_partition(self, values): + try: + return self.partitions[str(values)] + except KeyError: + raise PartitionNotFoundException() + + def update_partition(self, old_values, partiton_input): + partition = FakePartition(self.database_name, self.name, partiton_input) + key = str(partition.values) + if old_values == partiton_input['Values']: + # Altering a partition in place. Don't remove it so the order of + # returned partitions doesn't change + if key not in self.partitions: + raise PartitionNotFoundException() + else: + removed = self.partitions.pop(str(old_values), None) + if removed is None: + raise PartitionNotFoundException() + if key in self.partitions: + # Trying to update to overwrite a partition that exists + raise PartitionAlreadyExistsException() + self.partitions[key] = partition + + def delete_partition(self, values): + try: + del self.partitions[str(values)] + except KeyError: + raise PartitionNotFoundException() + + +class FakePartition(BaseModel): + def __init__(self, database_name, table_name, partiton_input): + self.creation_time = time.time() + self.database_name = database_name + self.table_name = table_name + self.partition_input = partiton_input + self.values = self.partition_input.get('Values', []) + + def as_dict(self): + obj = { + 'DatabaseName': self.database_name, + 'TableName': self.table_name, + 'CreationTime': self.creation_time, + } + obj.update(self.partition_input) + return obj + + +glue_backend = GlueBackend() diff --git a/moto/glue/responses.py b/moto/glue/responses.py new file mode 100644 index 000000000..875513e7f --- /dev/null +++ b/moto/glue/responses.py @@ -0,0 +1,239 @@ +from __future__ import unicode_literals + +import json + +from moto.core.responses import BaseResponse +from .models import glue_backend +from .exceptions import ( + PartitionAlreadyExistsException, + PartitionNotFoundException, + TableNotFoundException +) + + +class GlueResponse(BaseResponse): + + @property + def glue_backend(self): + return glue_backend + + @property + def parameters(self): + return json.loads(self.body) + + def create_database(self): + database_name = self.parameters['DatabaseInput']['Name'] + self.glue_backend.create_database(database_name) + return "" + + def get_database(self): + database_name = self.parameters.get('Name') + database = self.glue_backend.get_database(database_name) + return json.dumps({'Database': {'Name': database.name}}) + + def create_table(self): + database_name = self.parameters.get('DatabaseName') + table_input = self.parameters.get('TableInput') + table_name = table_input.get('Name') + self.glue_backend.create_table(database_name, table_name, table_input) + return "" + + def get_table(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('Name') + table = self.glue_backend.get_table(database_name, table_name) + + return json.dumps({'Table': table.as_dict()}) + + def update_table(self): + database_name = self.parameters.get('DatabaseName') + table_input = self.parameters.get('TableInput') + table_name = table_input.get('Name') + table = self.glue_backend.get_table(database_name, table_name) + table.update(table_input) + return "" + + def get_table_versions(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + table = self.glue_backend.get_table(database_name, table_name) + + return json.dumps({ + "TableVersions": [ + { + "Table": table.as_dict(version=n), + "VersionId": str(n + 1), + } for n in range(len(table.versions)) + ], + }) + + def get_table_version(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + table = self.glue_backend.get_table(database_name, table_name) + ver_id = self.parameters.get('VersionId') + + return json.dumps({ + "TableVersion": { + "Table": table.as_dict(version=ver_id), + "VersionId": ver_id, + }, + }) + + def get_tables(self): + database_name = self.parameters.get('DatabaseName') + tables = self.glue_backend.get_tables(database_name) + return json.dumps({ + 'TableList': [ + table.as_dict() for table in tables + ] + }) + + def delete_table(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('Name') + resp = self.glue_backend.delete_table(database_name, table_name) + return json.dumps(resp) + + def batch_delete_table(self): + database_name = self.parameters.get('DatabaseName') + + errors = [] + for table_name in self.parameters.get('TablesToDelete'): + try: + self.glue_backend.delete_table(database_name, table_name) + except TableNotFoundException: + errors.append({ + "TableName": table_name, + "ErrorDetail": { + "ErrorCode": "EntityNotFoundException", + "ErrorMessage": "Table not found" + } + }) + + out = {} + if errors: + out["Errors"] = errors + + return json.dumps(out) + + def get_partitions(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + if 'Expression' in self.parameters: + raise NotImplementedError("Expression filtering in get_partitions is not implemented in moto") + table = self.glue_backend.get_table(database_name, table_name) + + return json.dumps({ + 'Partitions': [ + p.as_dict() for p in table.get_partitions() + ] + }) + + def get_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + values = self.parameters.get('PartitionValues') + + table = self.glue_backend.get_table(database_name, table_name) + + p = table.get_partition(values) + + return json.dumps({'Partition': p.as_dict()}) + + def batch_get_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + partitions_to_get = self.parameters.get('PartitionsToGet') + + table = self.glue_backend.get_table(database_name, table_name) + + partitions = [] + for values in partitions_to_get: + try: + p = table.get_partition(values=values["Values"]) + partitions.append(p.as_dict()) + except PartitionNotFoundException: + continue + + return json.dumps({'Partitions': partitions}) + + def create_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + part_input = self.parameters.get('PartitionInput') + + table = self.glue_backend.get_table(database_name, table_name) + table.create_partition(part_input) + + return "" + + def batch_create_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + table = self.glue_backend.get_table(database_name, table_name) + + errors_output = [] + for part_input in self.parameters.get('PartitionInputList'): + try: + table.create_partition(part_input) + except PartitionAlreadyExistsException: + errors_output.append({ + 'PartitionValues': part_input['Values'], + 'ErrorDetail': { + 'ErrorCode': 'AlreadyExistsException', + 'ErrorMessage': 'Partition already exists.' + } + }) + + out = {} + if errors_output: + out["Errors"] = errors_output + + return json.dumps(out) + + def update_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + part_input = self.parameters.get('PartitionInput') + part_to_update = self.parameters.get('PartitionValueList') + + table = self.glue_backend.get_table(database_name, table_name) + table.update_partition(part_to_update, part_input) + + return "" + + def delete_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + part_to_delete = self.parameters.get('PartitionValues') + + table = self.glue_backend.get_table(database_name, table_name) + table.delete_partition(part_to_delete) + + return "" + + def batch_delete_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + table = self.glue_backend.get_table(database_name, table_name) + + errors_output = [] + for part_input in self.parameters.get('PartitionsToDelete'): + values = part_input.get('Values') + try: + table.delete_partition(values) + except PartitionNotFoundException: + errors_output.append({ + 'PartitionValues': values, + 'ErrorDetail': { + 'ErrorCode': 'EntityNotFoundException', + 'ErrorMessage': 'Partition not found', + } + }) + + out = {} + if errors_output: + out['Errors'] = errors_output + + return json.dumps(out) diff --git a/moto/glue/urls.py b/moto/glue/urls.py new file mode 100644 index 000000000..f3eaa9cad --- /dev/null +++ b/moto/glue/urls.py @@ -0,0 +1,11 @@ +from __future__ import unicode_literals + +from .responses import GlueResponse + +url_bases = [ + "https?://glue(.*).amazonaws.com" +] + +url_paths = { + '{0}/$': GlueResponse.dispatch +} diff --git a/moto/glue/utils.py b/moto/glue/utils.py new file mode 100644 index 000000000..baffc4882 --- /dev/null +++ b/moto/glue/utils.py @@ -0,0 +1 @@ +from __future__ import unicode_literals diff --git a/moto/iam/aws_managed_policies.py b/moto/iam/aws_managed_policies.py index df348c0d9..a8fca28e0 100644 --- a/moto/iam/aws_managed_policies.py +++ b/moto/iam/aws_managed_policies.py @@ -1,6 +1,49 @@ # Imported via `make aws_managed_policies` aws_managed_policies_data = """ { + "APIGatewayServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/APIGatewayServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-20T17:23:10+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "elasticloadbalancing:AddListenerCertificates", + "elasticloadbalancing:RemoveListenerCertificates", + "elasticloadbalancing:ModifyListener", + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + "xray:GetSamplingTargets", + "xray:GetSamplingRules" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "firehose:DescribeDeliveryStream", + "firehose:PutRecord", + "firehose:PutRecordBatch" + ], + "Effect": "Allow", + "Resource": "arn:aws:firehose:*:*:deliverystream/amazon-apigateway-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJQQDZNLDBF2ULTWK6", + "PolicyName": "APIGatewayServiceRolePolicy", + "UpdateDate": "2019-05-20T18:22:18+00:00", + "VersionId": "v4" + }, "AWSAccountActivityAccess": { "Arn": "arn:aws:iam::aws:policy/AWSAccountActivityAccess", "AttachmentCount": 0, @@ -21,6 +64,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJQRYCWMFX5J3E333K", "PolicyName": "AWSAccountActivityAccess", "UpdateDate": "2015-02-06T18:41:18+00:00", @@ -46,6 +90,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJLIB4VSBVO47ZSBB6", "PolicyName": "AWSAccountUsageReportAccess", "UpdateDate": "2015-02-06T18:41:19+00:00", @@ -127,11 +172,499 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIA3DIL7BYQ35ISM4K", "PolicyName": "AWSAgentlessDiscoveryService", "UpdateDate": "2016-08-02T01:35:11+00:00", "VersionId": "v1" }, + "AWSAppMeshFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSAppMeshFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-04-16T17:50:40+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "appmesh:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4ILVZ5BWFU", + "PolicyName": "AWSAppMeshFullAccess", + "UpdateDate": "2019-04-16T17:50:40+00:00", + "VersionId": "v1" + }, + "AWSAppMeshReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSAppMeshReadOnly", + "AttachmentCount": 0, + "CreateDate": "2019-04-16T17:51:11+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "appmesh:Describe*", + "appmesh:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4HOPFCIWXP", + "PolicyName": "AWSAppMeshReadOnly", + "UpdateDate": "2019-04-16T17:51:11+00:00", + "VersionId": "v1" + }, + "AWSAppMeshServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSAppMeshServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2019-06-03T18:30:51+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "servicediscovery:DiscoverInstances" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "CloudMapServiceDiscovery" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4B5IHMMEND", + "PolicyName": "AWSAppMeshServiceRolePolicy", + "UpdateDate": "2019-06-03T18:30:51+00:00", + "VersionId": "v1" + }, + "AWSAppSyncAdministrator": { + "Arn": "arn:aws:iam::aws:policy/AWSAppSyncAdministrator", + "AttachmentCount": 0, + "CreateDate": "2018-03-20T21:20:28+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "appsync:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "appsync.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJBYY36AJPXTTWIXCY", + "PolicyName": "AWSAppSyncAdministrator", + "UpdateDate": "2018-03-20T21:20:28+00:00", + "VersionId": "v1" + }, + "AWSAppSyncInvokeFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSAppSyncInvokeFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-03-20T21:21:20+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "appsync:GraphQL", + "appsync:GetGraphqlApi", + "appsync:ListGraphqlApis", + "appsync:ListApiKeys" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAILMPWRRZN27MPE3VM", + "PolicyName": "AWSAppSyncInvokeFullAccess", + "UpdateDate": "2018-03-20T21:21:20+00:00", + "VersionId": "v1" + }, + "AWSAppSyncPushToCloudWatchLogs": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSAppSyncPushToCloudWatchLogs", + "AttachmentCount": 0, + "CreateDate": "2018-04-09T19:38:55+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIWN7WNO34HLMJPUQS", + "PolicyName": "AWSAppSyncPushToCloudWatchLogs", + "UpdateDate": "2018-04-09T19:38:55+00:00", + "VersionId": "v1" + }, + "AWSAppSyncSchemaAuthor": { + "Arn": "arn:aws:iam::aws:policy/AWSAppSyncSchemaAuthor", + "AttachmentCount": 0, + "CreateDate": "2018-03-20T21:21:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "appsync:GraphQL", + "appsync:CreateResolver", + "appsync:CreateType", + "appsync:DeleteResolver", + "appsync:DeleteType", + "appsync:GetResolver", + "appsync:GetType", + "appsync:GetDataSource", + "appsync:GetSchemaCreationStatus", + "appsync:GetIntrospectionSchema", + "appsync:GetGraphqlApi", + "appsync:ListTypes", + "appsync:ListApiKeys", + "appsync:ListResolvers", + "appsync:ListDataSources", + "appsync:ListGraphqlApis", + "appsync:StartSchemaCreation", + "appsync:UpdateResolver", + "appsync:UpdateType" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIUCF5WVTOFQXFKY5E", + "PolicyName": "AWSAppSyncSchemaAuthor", + "UpdateDate": "2018-03-20T21:21:06+00:00", + "VersionId": "v1" + }, + "AWSApplicationAutoScalingCustomResourcePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoScalingCustomResourcePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-06-04T23:22:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "execute-api:Invoke", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJYTKXPX6DO32Z4XXA", + "PolicyName": "AWSApplicationAutoScalingCustomResourcePolicy", + "UpdateDate": "2018-06-04T23:22:44+00:00", + "VersionId": "v1" + }, + "AWSApplicationAutoscalingAppStreamFleetPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoscalingAppStreamFleetPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-20T19:04:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "appstream:UpdateFleet", + "appstream:DescribeFleets", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIRI724OWKP56ZG62M", + "PolicyName": "AWSApplicationAutoscalingAppStreamFleetPolicy", + "UpdateDate": "2017-10-20T19:04:06+00:00", + "VersionId": "v1" + }, + "AWSApplicationAutoscalingDynamoDBTablePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoscalingDynamoDBTablePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-20T21:34:57+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "dynamodb:DescribeTable", + "dynamodb:UpdateTable", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJOVQMDI3JFCBW4LFO", + "PolicyName": "AWSApplicationAutoscalingDynamoDBTablePolicy", + "UpdateDate": "2017-10-20T21:34:57+00:00", + "VersionId": "v1" + }, + "AWSApplicationAutoscalingEC2SpotFleetRequestPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoscalingEC2SpotFleetRequestPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-25T18:23:27+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeSpotFleetRequests", + "ec2:ModifySpotFleetRequest", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJNRH3VE3WW4Q4RDTU", + "PolicyName": "AWSApplicationAutoscalingEC2SpotFleetRequestPolicy", + "UpdateDate": "2017-10-25T18:23:27+00:00", + "VersionId": "v1" + }, + "AWSApplicationAutoscalingECSServicePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoscalingECSServicePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-25T23:53:08+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ecs:DescribeServices", + "ecs:UpdateService", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJFXLLV7AKH5PSFOYG", + "PolicyName": "AWSApplicationAutoscalingECSServicePolicy", + "UpdateDate": "2017-10-25T23:53:08+00:00", + "VersionId": "v1" + }, + "AWSApplicationAutoscalingEMRInstanceGroupPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoscalingEMRInstanceGroupPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-26T00:57:39+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "elasticmapreduce:ListInstanceGroups", + "elasticmapreduce:ModifyInstanceGroups", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIQ6M5Z7LQY2YSG2JS", + "PolicyName": "AWSApplicationAutoscalingEMRInstanceGroupPolicy", + "UpdateDate": "2017-10-26T00:57:39+00:00", + "VersionId": "v1" + }, + "AWSApplicationAutoscalingRDSClusterPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoscalingRDSClusterPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-17T17:46:56+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "rds:AddTagsToResource", + "rds:CreateDBInstance", + "rds:DeleteDBInstance", + "rds:DescribeDBClusters", + "rds:DescribeDBInstances", + "rds:ModifyDBCluster", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": "rds.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ7XS52I27Q2JVKALU", + "PolicyName": "AWSApplicationAutoscalingRDSClusterPolicy", + "UpdateDate": "2018-08-07T19:14:24+00:00", + "VersionId": "v3" + }, + "AWSApplicationAutoscalingSageMakerEndpointPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSApplicationAutoscalingSageMakerEndpointPolicy", + "AttachmentCount": 0, + "CreateDate": "2018-02-06T19:58:21+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sagemaker:DescribeEndpoint", + "sagemaker:DescribeEndpointConfig", + "sagemaker:UpdateEndpointWeightsAndCapacities", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI5DBEBNRZQ4SXYTAW", + "PolicyName": "AWSApplicationAutoscalingSageMakerEndpointPolicy", + "UpdateDate": "2018-02-06T19:58:21+00:00", + "VersionId": "v1" + }, "AWSApplicationDiscoveryAgentAccess": { "Arn": "arn:aws:iam::aws:policy/AWSApplicationDiscoveryAgentAccess", "AttachmentCount": 0, @@ -152,6 +685,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAICZIOVAGC6JPF3WHC", "PolicyName": "AWSApplicationDiscoveryAgentAccess", "UpdateDate": "2016-05-11T21:38:47+00:00", @@ -161,11 +695,157 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AWSApplicationDiscoveryServiceFullAccess", "AttachmentCount": 0, "CreateDate": "2016-05-11T21:30:50+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "mgh:*", + "discovery:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:GetRole" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "continuousexport.discovery.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/continuousexport.discovery.amazonaws.com/AWSServiceRoleForApplicationDiscoveryServiceContinuousExport*" + }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/continuousexport.discovery.amazonaws.com/AWSServiceRoleForApplicationDiscoveryServiceContinuousExport*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJBNJEA6ZXM2SBOPDU", + "PolicyName": "AWSApplicationDiscoveryServiceFullAccess", + "UpdateDate": "2018-08-16T16:02:27+00:00", + "VersionId": "v3" + }, + "AWSArtifactAccountSync": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSArtifactAccountSync", + "AttachmentCount": 0, + "CreateDate": "2018-04-10T23:04:33+00:00", "DefaultVersionId": "v1", "Document": { "Statement": [ { - "Action": "discovery:*", + "Action": [ + "organizations:ListAccounts", + "organizations:DescribeOrganization" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJMVPXRWZJZWDTYDNC", + "PolicyName": "AWSArtifactAccountSync", + "UpdateDate": "2018-04-10T23:04:33+00:00", + "VersionId": "v1" + }, + "AWSAutoScalingPlansEC2AutoScalingPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSAutoScalingPlansEC2AutoScalingPolicy", + "AttachmentCount": 0, + "CreateDate": "2018-08-23T22:46:59+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:GetMetricData", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeScheduledActions", + "autoscaling:BatchPutScheduledUpdateGroupAction", + "autoscaling:BatchDeleteScheduledAction" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIXWLPZPD4RYBM3JSU", + "PolicyName": "AWSAutoScalingPlansEC2AutoScalingPolicy", + "UpdateDate": "2018-08-23T22:46:59+00:00", + "VersionId": "v1" + }, + "AWSB9InternalServicePolicy": { + "Arn": "arn:aws:iam::aws:policy/AWSB9InternalServicePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-12-13T18:48:22+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterfacePermission", + "ec2:DescribeNetworkInterfaces", + "ec2:DeleteNetworkInterface", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeSecurityGroups", + "greengrass:CreateDeployment", + "greengrass:CreateGroupVersion", + "greengrass:CreateFunctionDefinition", + "greengrass:CreateFunctionDefinitionVersion", + "greengrass:GetDeploymentStatus", + "greengrass:GetGroup", + "greengrass:GetGroupVersion", + "greengrass:GetCoreDefinitionVersion", + "greengrass:GetFunctionDefinitionVersion", + "greengrass:GetAssociatedRole", + "lambda:CreateFunction" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "lambda:UpdateFunctionCode", + "lambda:GetFunction", + "lambda:UpdateFunctionConfiguration" + ], + "Effect": "Allow", + "Resource": "arn:aws:lambda:*:*:function:aws-robomaker-*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringEqualsIfExists": { + "iam:PassedToService": "lambda.amazonaws.com" + } + }, "Effect": "Allow", "Resource": "*" } @@ -175,16 +855,532 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", - "PolicyId": "ANPAJBNJEA6ZXM2SBOPDU", - "PolicyName": "AWSApplicationDiscoveryServiceFullAccess", - "UpdateDate": "2016-05-11T21:30:50+00:00", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIWR2IIOQ7JJGVQOPW", + "PolicyName": "AWSB9InternalServicePolicy", + "UpdateDate": "2018-12-13T18:48:22+00:00", "VersionId": "v1" }, + "AWSBackupAdminPolicy": { + "Arn": "arn:aws:iam::aws:policy/AWSBackupAdminPolicy", + "AttachmentCount": 0, + "CreateDate": "2019-01-19T02:34:31+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": "backup:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "backup-storage:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "rds:DescribeDBSnapshots", + "rds:ListTagsForResource", + "rds:DescribeDBInstances", + "rds:describeDBSnapshots", + "rds:describeDBEngineVersions", + "rds:describeOptionGroups", + "rds:describeOrderableDBInstanceOptions", + "rds:describeDBSubnetGroups" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "dynamodb:ListBackups", + "dynamodb:ListTables" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "elasticfilesystem:DescribeFilesystems" + ], + "Effect": "Allow", + "Resource": "arn:aws:elasticfilesystem:*:*:file-system/*" + }, + { + "Action": [ + "ec2:DescribeSnapshots", + "ec2:DescribeVolumes", + "ec2:describeAvailabilityZones" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "tag:GetTagKeys", + "tag:GetTagValues", + "tag:GetResources" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "storagegateway:DescribeCachediSCSIVolumes", + "storagegateway:DescribeStorediSCSIVolumes" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:gateway/*/volume/*" + }, + { + "Action": [ + "storagegateway:ListGateways" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:*" + }, + { + "Action": [ + "storagegateway:DescribeGatewayInformation", + "storagegateway:ListVolumes", + "storagegateway:ListLocalDisks" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:gateway/*" + }, + { + "Action": [ + "iam:ListRoles", + "iam:GetRole", + "iam:GetUser" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": "backup.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/*AwsBackup*", + "arn:aws:iam::*:role/*AWSBackup*" + ] + }, + { + "Action": [ + "kms:ListKeys", + "kms:DescribeKey", + "kms:GenerateDataKey", + "kms:RetireGrant", + "kms:CreateGrant", + "kms:ListAliases", + "kms:Decrypt" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJWFPFHACTI7XN6M2C", + "PolicyName": "AWSBackupAdminPolicy", + "UpdateDate": "2019-03-11T22:14:30+00:00", + "VersionId": "v2" + }, + "AWSBackupOperatorPolicy": { + "Arn": "arn:aws:iam::aws:policy/AWSBackupOperatorPolicy", + "AttachmentCount": 0, + "CreateDate": "2019-01-19T02:31:55+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "backup:Get*", + "backup:List*", + "backup:Describe*", + "backup:CreateBackupSelection", + "backup:DeleteBackupSelection", + "backup:GetRecoveryPointRestoreMetadata", + "backup:StartBackupJob", + "backup:StartRestoreJob" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "rds:DescribeDBSnapshots", + "rds:ListTagsForResource", + "rds:DescribeDBInstances", + "rds:describeDBSnapshots", + "rds:describeDBEngineVersions", + "rds:describeOptionGroups", + "rds:describeOrderableDBInstanceOptions", + "rds:describeDBSubnetGroups" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "dynamodb:ListBackups", + "dynamodb:ListTables" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "elasticfilesystem:DescribeFilesystems" + ], + "Effect": "Allow", + "Resource": "arn:aws:elasticfilesystem:*:*:file-system/*" + }, + { + "Action": [ + "ec2:DescribeSnapshots", + "ec2:DescribeVolumes", + "ec2:describeAvailabilityZones" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "tag:GetTagKeys", + "tag:GetTagValues", + "tag:GetResources" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "storagegateway:DescribeCachediSCSIVolumes", + "storagegateway:DescribeStorediSCSIVolumes" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:gateway/*/volume/*" + }, + { + "Action": [ + "storagegateway:ListGateways" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:*" + }, + { + "Action": [ + "storagegateway:DescribeGatewayInformation", + "storagegateway:ListVolumes", + "storagegateway:ListLocalDisks" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:gateway/*" + }, + { + "Action": [ + "iam:ListRoles", + "iam:GetRole", + "iam:GetUser" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": "backup.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/*AwsBackup*", + "arn:aws:iam::*:role/*AWSBackup*" + ] + }, + { + "Action": [ + "kms:ListKeys", + "kms:DescribeKey", + "kms:GenerateDataKey", + "kms:RetireGrant", + "kms:CreateGrant", + "kms:ListAliases", + "kms:Decrypt" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ7BHZKKS47SGORCJE", + "PolicyName": "AWSBackupOperatorPolicy", + "UpdateDate": "2019-03-11T22:18:12+00:00", + "VersionId": "v2" + }, + "AWSBackupServiceRolePolicyForBackup": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup", + "AttachmentCount": 0, + "CreateDate": "2019-01-10T21:01:28+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "dynamodb:DescribeTable", + "dynamodb:CreateBackup" + ], + "Effect": "Allow", + "Resource": "arn:aws:dynamodb:*:*:table/*" + }, + { + "Action": [ + "dynamodb:DescribeBackup", + "dynamodb:DeleteBackup" + ], + "Effect": "Allow", + "Resource": "arn:aws:dynamodb:*:*:table/*/backup/*" + }, + { + "Action": [ + "rds:AddTagsToResource", + "rds:ListTagsForResource", + "rds:DescribeDBSnapshots", + "rds:CreateDBSnapshot", + "rds:CopyDBSnapshot", + "rds:DescribeDBInstances" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "rds:DeleteDBSnapshot" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:rds:*:*:snapshot:awsbackup:*" + ] + }, + { + "Action": [ + "storagegateway:CreateSnapshot" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:gateway/*/volume/*" + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteSnapshot" + ], + "Effect": "Allow", + "Resource": "arn:aws:ec2:*::snapshot/*" + }, + { + "Action": [ + "ec2:DescribeSnapshots" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "elasticfilesystem:Backup" + ], + "Effect": "Allow", + "Resource": "arn:aws:elasticfilesystem:*:*:file-system/*" + }, + { + "Action": [ + "ec2:CreateSnapshot", + "ec2:DeleteSnapshot", + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*::snapshot/*", + "arn:aws:ec2:*:*:volume/*" + ] + }, + { + "Action": "kms:DescribeKey", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "kms:CreateGrant", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "tag:GetResources" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIOOYZSLZZXWFJJ5N2", + "PolicyName": "AWSBackupServiceRolePolicyForBackup", + "UpdateDate": "2019-04-25T19:15:48+00:00", + "VersionId": "v2" + }, + "AWSBackupServiceRolePolicyForRestores": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForRestores", + "AttachmentCount": 0, + "CreateDate": "2019-01-12T00:23:54+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "dynamodb:Scan", + "dynamodb:Query", + "dynamodb:UpdateItem", + "dynamodb:PutItem", + "dynamodb:GetItem", + "dynamodb:DeleteItem", + "dynamodb:BatchWriteItem", + "dynamodb:DescribeTable" + ], + "Effect": "Allow", + "Resource": "arn:aws:dynamodb:*:*:table/*" + }, + { + "Action": [ + "dynamodb:RestoreTableFromBackup" + ], + "Effect": "Allow", + "Resource": "arn:aws:dynamodb:*:*:table/*/backup/*" + }, + { + "Action": [ + "ec2:CreateVolume", + "ec2:DeleteVolume" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*::snapshot/*", + "arn:aws:ec2:*:*:volume/*" + ] + }, + { + "Action": [ + "ec2:DescribeSnapshots", + "ec2:DescribeVolumes" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "storagegateway:DeleteVolume", + "storagegateway:DescribeCachediSCSIVolumes", + "storagegateway:DescribeStorediSCSIVolumes" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:gateway/*/volume/*" + }, + { + "Action": [ + "storagegateway:DescribeGatewayInformation", + "storagegateway:CreateStorediSCSIVolume", + "storagegateway:CreateCachediSCSIVolume" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:gateway/*" + }, + { + "Action": [ + "storagegateway:ListVolumes" + ], + "Effect": "Allow", + "Resource": "arn:aws:storagegateway:*:*:*" + }, + { + "Action": [ + "rds:DescribeDBInstances", + "rds:DescribeDBSnapshots", + "rds:ListTagsForResource", + "rds:RestoreDBInstanceFromDBSnapshot", + "rds:DeleteDBInstance", + "rds:AddTagsToResource" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "elasticfilesystem:Restore", + "elasticfilesystem:CreateFilesystem", + "elasticfilesystem:DescribeFilesystems", + "elasticfilesystem:DeleteFilesystem" + ], + "Effect": "Allow", + "Resource": "arn:aws:elasticfilesystem:*:*:file-system/*" + }, + { + "Action": "kms:DescribeKey", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "kms:CreateGrant", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJZCCL6F2WPVOUXZKI", + "PolicyName": "AWSBackupServiceRolePolicyForRestores", + "UpdateDate": "2019-04-25T19:17:26+00:00", + "VersionId": "v3" + }, "AWSBatchFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSBatchFullAccess", "AttachmentCount": 0, - "CreateDate": "2016-12-13T00:38:59+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2016-12-06T19:35:42+00:00", + "DefaultVersionId": "v5", "Document": { "Statement": [ { @@ -194,6 +1390,10 @@ aws_managed_policies_data = """ "ec2:DescribeSubnets", "ec2:DescribeSecurityGroups", "ec2:DescribeKeyPairs", + "ec2:DescribeVpcs", + "ec2:DescribeImages", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", "ecs:DescribeClusters", "ecs:Describe*", "ecs:List*", @@ -214,7 +1414,9 @@ aws_managed_policies_data = """ "Effect": "Allow", "Resource": [ "arn:aws:iam::*:role/AWSBatchServiceRole", + "arn:aws:iam::*:role/service-role/AWSBatchServiceRole", "arn:aws:iam::*:role/ecsInstanceRole", + "arn:aws:iam::*:instance-profile/ecsInstanceRole", "arn:aws:iam::*:role/iaws-ec2-spot-fleet-role", "arn:aws:iam::*:role/aws-ec2-spot-fleet-role", "arn:aws:iam::*:role/AWSBatchJobRole*" @@ -226,34 +1428,68 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ7K2KIWB3HZVK3CUO", "PolicyName": "AWSBatchFullAccess", - "UpdateDate": "2016-12-13T00:38:59+00:00", - "VersionId": "v2" + "UpdateDate": "2018-11-05T21:09:23+00:00", + "VersionId": "v5" + }, + "AWSBatchServiceEventTargetRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSBatchServiceEventTargetRole", + "AttachmentCount": 0, + "CreateDate": "2018-02-28T22:31:13+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "batch:SubmitJob" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAICVHHZ6XHNMA6VE3Q", + "PolicyName": "AWSBatchServiceEventTargetRole", + "UpdateDate": "2018-02-28T22:31:13+00:00", + "VersionId": "v1" }, "AWSBatchServiceRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole", "AttachmentCount": 0, - "CreateDate": "2017-05-11T20:44:52+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2016-12-06T19:36:24+00:00", + "DefaultVersionId": "v9", "Document": { "Statement": [ { "Action": [ "ec2:DescribeAccountAttributes", "ec2:DescribeInstances", + "ec2:DescribeInstanceAttribute", "ec2:DescribeSubnets", "ec2:DescribeSecurityGroups", "ec2:DescribeKeyPairs", "ec2:DescribeImages", "ec2:DescribeImageAttribute", + "ec2:DescribeSpotInstanceRequests", "ec2:DescribeSpotFleetInstances", "ec2:DescribeSpotFleetRequests", "ec2:DescribeSpotPriceHistory", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeLaunchTemplateVersions", + "ec2:CreateLaunchTemplate", + "ec2:DeleteLaunchTemplate", "ec2:RequestSpotFleet", "ec2:CancelSpotFleetRequests", "ec2:ModifySpotFleetRequest", "ec2:TerminateInstances", + "ec2:RunInstances", "autoscaling:DescribeAccountLimits", "autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeLaunchConfigurations", @@ -291,10 +1527,54 @@ aws_managed_policies_data = """ "logs:PutLogEvents", "logs:DescribeLogGroups", "iam:GetInstanceProfile", - "iam:PassRole" + "iam:GetRole" ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "ec2.amazonaws.com", + "ecs-tasks.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": [ + "spot.amazonaws.com", + "spotfleet.amazonaws.com", + "autoscaling.amazonaws.com", + "ecs.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Condition": { + "StringEquals": { + "ec2:CreateAction": "RunInstances" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] } ], "Version": "2012-10-17" @@ -302,10 +1582,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIUETIXPCKASQJURFE", "PolicyName": "AWSBatchServiceRole", - "UpdateDate": "2017-05-11T20:44:52+00:00", - "VersionId": "v4" + "UpdateDate": "2018-10-30T19:00:56+00:00", + "VersionId": "v9" }, "AWSCertificateManagerFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerFullAccess", @@ -327,15 +1608,149 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJYCHABBP6VQIVBCBQ", "PolicyName": "AWSCertificateManagerFullAccess", "UpdateDate": "2016-01-21T17:02:36+00:00", "VersionId": "v1" }, + "AWSCertificateManagerPrivateCAAuditor": { + "Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerPrivateCAAuditor", + "AttachmentCount": 0, + "CreateDate": "2018-10-23T16:51:08+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "acm-pca:CreateCertificateAuthorityAuditReport", + "acm-pca:DescribeCertificateAuthority", + "acm-pca:DescribeCertificateAuthorityAuditReport", + "acm-pca:GetCertificateAuthorityCsr", + "acm-pca:GetCertificateAuthorityCertificate", + "acm-pca:GetCertificate", + "acm-pca:ListPermissions", + "acm-pca:ListTags" + ], + "Effect": "Allow", + "Resource": "arn:aws:acm-pca:*:*:certificate-authority/*" + }, + { + "Action": [ + "acm-pca:ListCertificateAuthorities" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJW77VE4UEBJ4PEXEY", + "PolicyName": "AWSCertificateManagerPrivateCAAuditor", + "UpdateDate": "2019-03-14T17:17:38+00:00", + "VersionId": "v3" + }, + "AWSCertificateManagerPrivateCAFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerPrivateCAFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-10-23T16:54:50+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "acm-pca:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIRTQUC55CREAWFLBG", + "PolicyName": "AWSCertificateManagerPrivateCAFullAccess", + "UpdateDate": "2018-10-23T16:54:50+00:00", + "VersionId": "v1" + }, + "AWSCertificateManagerPrivateCAReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerPrivateCAReadOnly", + "AttachmentCount": 0, + "CreateDate": "2018-10-23T16:57:04+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": { + "Action": [ + "acm-pca:DescribeCertificateAuthority", + "acm-pca:DescribeCertificateAuthorityAuditReport", + "acm-pca:ListCertificateAuthorities", + "acm-pca:GetCertificateAuthorityCsr", + "acm-pca:GetCertificateAuthorityCertificate", + "acm-pca:GetCertificate", + "acm-pca:ListPermissions", + "acm-pca:ListTags" + ], + "Effect": "Allow", + "Resource": "*" + }, + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJQAQT3WIXOXY7TD4A", + "PolicyName": "AWSCertificateManagerPrivateCAReadOnly", + "UpdateDate": "2019-03-14T17:17:21+00:00", + "VersionId": "v2" + }, + "AWSCertificateManagerPrivateCAUser": { + "Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerPrivateCAUser", + "AttachmentCount": 0, + "CreateDate": "2018-10-23T16:53:33+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "acm-pca:IssueCertificate", + "acm-pca:RevokeCertificate", + "acm-pca:GetCertificate", + "acm-pca:ListPermissions" + ], + "Effect": "Allow", + "Resource": "arn:aws:acm-pca:*:*:certificate-authority/*" + }, + { + "Action": [ + "acm-pca:ListCertificateAuthorities" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJBXCSJJULLMRWSNII", + "PolicyName": "AWSCertificateManagerPrivateCAUser", + "UpdateDate": "2019-03-14T17:17:02+00:00", + "VersionId": "v3" + }, "AWSCertificateManagerReadOnly": { "Arn": "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly", "AttachmentCount": 0, - "CreateDate": "2016-04-21T15:08:16+00:00", + "CreateDate": "2016-01-21T17:07:33+00:00", "DefaultVersionId": "v2", "Document": { "Statement": { @@ -353,26 +1768,274 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI4GSWX6S4MESJ3EWC", "PolicyName": "AWSCertificateManagerReadOnly", "UpdateDate": "2016-04-21T15:08:16+00:00", "VersionId": "v2" }, - "AWSCloudFormationReadOnlyAccess": { - "Arn": "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess", + "AWSCloud9Administrator": { + "Arn": "arn:aws:iam::aws:policy/AWSCloud9Administrator", "AttachmentCount": 0, - "CreateDate": "2015-02-06T18:39:49+00:00", + "CreateDate": "2017-11-30T16:17:28+00:00", "DefaultVersionId": "v1", "Document": { "Statement": [ { "Action": [ + "cloud9:*", + "iam:GetUser", + "iam:ListUsers", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringLike": { + "iam:AWSServiceName": "cloud9.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIQ4KWP455WDTCBGWK", + "PolicyName": "AWSCloud9Administrator", + "UpdateDate": "2017-11-30T16:17:28+00:00", + "VersionId": "v1" + }, + "AWSCloud9EnvironmentMember": { + "Arn": "arn:aws:iam::aws:policy/AWSCloud9EnvironmentMember", + "AttachmentCount": 0, + "CreateDate": "2017-11-30T16:18:28+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloud9:GetUserSettings", + "cloud9:UpdateUserSettings", + "iam:GetUser", + "iam:ListUsers" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloud9:DescribeEnvironmentMemberships" + ], + "Condition": { + "Null": { + "cloud9:EnvironmentId": "true", + "cloud9:UserArn": "true" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI54ULAIPVT5HFTYGK", + "PolicyName": "AWSCloud9EnvironmentMember", + "UpdateDate": "2017-11-30T16:18:28+00:00", + "VersionId": "v1" + }, + "AWSCloud9ServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSCloud9ServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-11-30T13:44:08+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:RunInstances", + "ec2:CreateSecurityGroup", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeInstances", + "cloudformation:CreateStack", "cloudformation:DescribeStacks", "cloudformation:DescribeStackEvents", - "cloudformation:DescribeStackResource", - "cloudformation:DescribeStackResources", - "cloudformation:GetTemplate", - "cloudformation:List*" + "cloudformation:DescribeStackResources" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:TerminateInstances", + "ec2:DeleteSecurityGroup", + "ec2:AuthorizeSecurityGroupIngress" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudformation:DeleteStack" + ], + "Effect": "Allow", + "Resource": "arn:aws:cloudformation:*:*:stack/aws-cloud9-*" + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Condition": { + "StringLike": { + "aws:RequestTag/Name": "aws-cloud9-*" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:instance/*" + }, + { + "Action": [ + "ec2:StartInstances", + "ec2:StopInstances" + ], + "Condition": { + "StringLike": { + "ec2:ResourceTag/aws:cloudformation:stack-name": "aws-cloud9-*" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJFXGCBXQIZATFZ4YG", + "PolicyName": "AWSCloud9ServiceRolePolicy", + "UpdateDate": "2018-02-27T10:20:24+00:00", + "VersionId": "v2" + }, + "AWSCloud9User": { + "Arn": "arn:aws:iam::aws:policy/AWSCloud9User", + "AttachmentCount": 0, + "CreateDate": "2017-11-30T16:16:17+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "cloud9:ValidateEnvironmentName", + "cloud9:UpdateUserSettings", + "cloud9:GetUserSettings", + "iam:GetUser", + "iam:ListUsers", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloud9:CreateEnvironmentEC2", + "cloud9:CreateEnvironmentSSH" + ], + "Condition": { + "Null": { + "cloud9:OwnerArn": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloud9:GetUserPublicKey" + ], + "Condition": { + "Null": { + "cloud9:UserArn": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloud9:DescribeEnvironmentMemberships" + ], + "Condition": { + "Null": { + "cloud9:EnvironmentId": "true", + "cloud9:UserArn": "true" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringLike": { + "iam:AWSServiceName": "cloud9.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJPFGFWQF67QVARP6U", + "PolicyName": "AWSCloud9User", + "UpdateDate": "2018-07-02T08:46:37+00:00", + "VersionId": "v3" + }, + "AWSCloudFormationReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudFormationReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:39:49+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "cloudformation:Describe*", + "cloudformation:EstimateTemplateCost", + "cloudformation:Get*", + "cloudformation:List*", + "cloudformation:ValidateTemplate", + "cloudformation:DetectStackDrift", + "cloudformation:DetectStackResourceDrift" ], "Effect": "Allow", "Resource": "*" @@ -383,9 +2046,38 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJWVBEE4I2POWLODLW", "PolicyName": "AWSCloudFormationReadOnlyAccess", - "UpdateDate": "2015-02-06T18:39:49+00:00", + "UpdateDate": "2019-02-06T22:16:02+00:00", + "VersionId": "v3" + }, + "AWSCloudFrontLogger": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSCloudFrontLogger", + "AttachmentCount": 0, + "CreateDate": "2018-06-12T20:15:23+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:/aws/cloudfront/*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIOI7RPKLCNINBTRP4", + "PolicyName": "AWSCloudFrontLogger", + "UpdateDate": "2018-06-12T20:15:23+00:00", "VersionId": "v1" }, "AWSCloudHSMFullAccess": { @@ -406,6 +2098,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIMBQYQZM7F63DA2UU", "PolicyName": "AWSCloudHSMFullAccess", "UpdateDate": "2015-02-06T18:39:51+00:00", @@ -433,6 +2126,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAISVCBSY7YDBOT67KE", "PolicyName": "AWSCloudHSMReadOnlyAccess", "UpdateDate": "2015-02-06T18:39:52+00:00", @@ -467,16 +2161,153 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI7QIUU4GC66SF26WE", "PolicyName": "AWSCloudHSMRole", "UpdateDate": "2015-02-06T18:41:23+00:00", "VersionId": "v1" }, + "AWSCloudMapDiscoverInstanceAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudMapDiscoverInstanceAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-29T00:02:42+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "servicediscovery:DiscoverInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIPRD7PYYQVYPDME4K", + "PolicyName": "AWSCloudMapDiscoverInstanceAccess", + "UpdateDate": "2018-11-29T00:02:42+00:00", + "VersionId": "v1" + }, + "AWSCloudMapFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudMapFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T23:57:31+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "route53:GetHostedZone", + "route53:ListHostedZonesByName", + "route53:CreateHostedZone", + "route53:DeleteHostedZone", + "route53:ChangeResourceRecordSets", + "route53:CreateHealthCheck", + "route53:GetHealthCheck", + "route53:DeleteHealthCheck", + "route53:UpdateHealthCheck", + "ec2:DescribeVpcs", + "ec2:DescribeRegions", + "servicediscovery:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIZPIMAQZJS3WUXUJM", + "PolicyName": "AWSCloudMapFullAccess", + "UpdateDate": "2018-11-28T23:57:31+00:00", + "VersionId": "v1" + }, + "AWSCloudMapReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudMapReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T23:45:26+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "servicediscovery:Get*", + "servicediscovery:List*", + "servicediscovery:DiscoverInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIOHISHKLCJTVQQL5E", + "PolicyName": "AWSCloudMapReadOnlyAccess", + "UpdateDate": "2018-11-28T23:45:26+00:00", + "VersionId": "v1" + }, + "AWSCloudMapRegisterInstanceAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSCloudMapRegisterInstanceAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-29T00:04:57+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "route53:GetHostedZone", + "route53:ListHostedZonesByName", + "route53:ChangeResourceRecordSets", + "route53:CreateHealthCheck", + "route53:GetHealthCheck", + "route53:DeleteHealthCheck", + "route53:UpdateHealthCheck", + "servicediscovery:Get*", + "servicediscovery:List*", + "servicediscovery:RegisterInstance", + "servicediscovery:DeregisterInstance", + "servicediscovery:DiscoverInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI4P5Z5HXVWJ75WQBC", + "PolicyName": "AWSCloudMapRegisterInstanceAccess", + "UpdateDate": "2018-11-29T00:04:57+00:00", + "VersionId": "v1" + }, "AWSCloudTrailFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCloudTrailFullAccess", "AttachmentCount": 0, - "CreateDate": "2016-02-16T18:31:28+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2015-02-06T18:39:58+00:00", + "DefaultVersionId": "v7", "Document": { "Statement": [ { @@ -534,6 +2365,13 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "lambda:ListFunctions" + ], + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -541,16 +2379,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIQNUJTQYDRJPC3BNK", "PolicyName": "AWSCloudTrailFullAccess", - "UpdateDate": "2016-02-16T18:31:28+00:00", - "VersionId": "v4" + "UpdateDate": "2019-05-21T23:39:06+00:00", + "VersionId": "v7" }, "AWSCloudTrailReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCloudTrailReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-12-14T20:41:52+00:00", - "DefaultVersionId": "v6", + "CreateDate": "2015-02-06T18:39:59+00:00", + "DefaultVersionId": "v7", "Document": { "Statement": [ { @@ -570,7 +2409,8 @@ aws_managed_policies_data = """ "cloudtrail:ListPublicKeys", "cloudtrail:GetEventSelectors", "s3:ListAllMyBuckets", - "kms:ListAliases" + "kms:ListAliases", + "lambda:ListFunctions" ], "Effect": "Allow", "Resource": "*" @@ -581,16 +2421,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJDU7KJADWBSEQ3E7S", "PolicyName": "AWSCloudTrailReadOnlyAccess", - "UpdateDate": "2016-12-14T20:41:52+00:00", - "VersionId": "v6" + "UpdateDate": "2017-12-11T19:51:37+00:00", + "VersionId": "v7" }, "AWSCodeBuildAdminAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCodeBuildAdminAccess", "AttachmentCount": 0, "CreateDate": "2016-12-01T19:04:44+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v6", "Document": { "Statement": [ { @@ -601,8 +2442,22 @@ aws_managed_policies_data = """ "codecommit:GetRepository", "codecommit:ListBranches", "codecommit:ListRepositories", + "cloudwatch:GetMetricStatistics", + "ec2:DescribeVpcs", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", "ecr:DescribeRepositories", "ecr:ListImages", + "events:DeleteRule", + "events:DescribeRule", + "events:DisableRule", + "events:EnableRule", + "events:ListTargetsByRule", + "events:ListRuleNamesByTarget", + "events:PutRule", + "events:PutTargets", + "events:RemoveTargets", + "logs:GetLogEvents", "s3:GetBucketLocation", "s3:ListAllMyBuckets" ], @@ -611,10 +2466,17 @@ aws_managed_policies_data = """ }, { "Action": [ - "logs:GetLogEvents" + "logs:DeleteLogGroup" ], "Effect": "Allow", "Resource": "arn:aws:logs:*:*:log-group:/aws/codebuild/*:log-stream:*" + }, + { + "Action": [ + "ssm:PutParameter" + ], + "Effect": "Allow", + "Resource": "arn:aws:ssm:*:*:parameter/CodeBuild/*" } ], "Version": "2012-10-17" @@ -622,16 +2484,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJQJGIOIE3CD2TQXDS", "PolicyName": "AWSCodeBuildAdminAccess", - "UpdateDate": "2016-12-01T19:04:44+00:00", - "VersionId": "v1" + "UpdateDate": "2018-11-15T21:21:56+00:00", + "VersionId": "v6" }, "AWSCodeBuildDeveloperAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCodeBuildDeveloperAccess", "AttachmentCount": 0, "CreateDate": "2016-12-01T19:02:32+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -645,6 +2508,11 @@ aws_managed_policies_data = """ "codecommit:GetCommit", "codecommit:GetRepository", "codecommit:ListBranches", + "cloudwatch:GetMetricStatistics", + "events:DescribeRule", + "events:ListTargetsByRule", + "events:ListRuleNamesByTarget", + "logs:GetLogEvents", "s3:GetBucketLocation", "s3:ListAllMyBuckets" ], @@ -653,10 +2521,10 @@ aws_managed_policies_data = """ }, { "Action": [ - "logs:GetLogEvents" + "ssm:PutParameter" ], "Effect": "Allow", - "Resource": "arn:aws:logs:*:*:log-group:/aws/codebuild/*:log-stream:*" + "Resource": "arn:aws:ssm:*:*:parameter/CodeBuild/*" } ], "Version": "2012-10-17" @@ -664,16 +2532,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIMKTMR34XSBQW45HS", "PolicyName": "AWSCodeBuildDeveloperAccess", - "UpdateDate": "2016-12-01T19:02:32+00:00", - "VersionId": "v1" + "UpdateDate": "2018-11-15T21:32:53+00:00", + "VersionId": "v4" }, "AWSCodeBuildReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCodeBuildReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2016-12-01T19:03:41+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -683,17 +2552,15 @@ aws_managed_policies_data = """ "codebuild:List*", "codecommit:GetBranch", "codecommit:GetCommit", - "codecommit:GetRepository" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ + "codecommit:GetRepository", + "cloudwatch:GetMetricStatistics", + "events:DescribeRule", + "events:ListTargetsByRule", + "events:ListRuleNamesByTarget", "logs:GetLogEvents" ], "Effect": "Allow", - "Resource": "arn:aws:logs:*:*:log-group:/aws/codebuild/*:log-stream:*" + "Resource": "*" } ], "Version": "2012-10-17" @@ -701,16 +2568,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJIZZWN6557F5HVP2K", "PolicyName": "AWSCodeBuildReadOnlyAccess", - "UpdateDate": "2016-12-01T19:03:41+00:00", - "VersionId": "v1" + "UpdateDate": "2018-11-15T21:38:34+00:00", + "VersionId": "v3" }, "AWSCodeCommitFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCodeCommitFullAccess", "AttachmentCount": 0, "CreateDate": "2015-07-09T17:02:19+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -719,6 +2587,94 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "events:DeleteRule", + "events:DescribeRule", + "events:DisableRule", + "events:EnableRule", + "events:PutRule", + "events:PutTargets", + "events:RemoveTargets", + "events:ListTargetsByRule" + ], + "Effect": "Allow", + "Resource": "arn:aws:events:*:*:rule/codecommit*", + "Sid": "CloudWatchEventsCodeCommitRulesAccess" + }, + { + "Action": [ + "sns:CreateTopic", + "sns:DeleteTopic", + "sns:Subscribe", + "sns:Unsubscribe", + "sns:SetTopicAttributes" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:codecommit*", + "Sid": "SNSTopicAndSubscriptionAccess" + }, + { + "Action": [ + "sns:ListTopics", + "sns:ListSubscriptionsByTopic", + "sns:GetTopicAttributes" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "SNSTopicAndSubscriptionReadAccess" + }, + { + "Action": [ + "lambda:ListFunctions" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "LambdaReadOnlyListAccess" + }, + { + "Action": [ + "iam:ListUsers" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "IAMReadOnlyListAccess" + }, + { + "Action": [ + "iam:ListAccessKeys", + "iam:ListSSHPublicKeys", + "iam:ListServiceSpecificCredentials", + "iam:ListAccessKeys", + "iam:GetSSHPublicKey" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:user/${aws:username}", + "Sid": "IAMReadOnlyConsoleAccess" + }, + { + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:user/${aws:username}", + "Sid": "IAMUserSSHKeys" + }, + { + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ResetServiceSpecificCredential" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:user/${aws:username}", + "Sid": "IAMSelfManageServiceSpecificCredentials" } ], "Version": "2012-10-17" @@ -726,34 +2682,126 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI4VCZ3XPIZLQ5NZV2", "PolicyName": "AWSCodeCommitFullAccess", - "UpdateDate": "2015-07-09T17:02:19+00:00", - "VersionId": "v1" + "UpdateDate": "2017-11-20T20:04:31+00:00", + "VersionId": "v2" }, "AWSCodeCommitPowerUser": { "Arn": "arn:aws:iam::aws:policy/AWSCodeCommitPowerUser", "AttachmentCount": 0, - "CreateDate": "2017-05-22T21:12:48+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2015-07-09T17:06:49+00:00", + "DefaultVersionId": "v6", "Document": { "Statement": [ { "Action": [ - "codecommit:BatchGetRepositories", - "codecommit:CreateBranch", - "codecommit:CreateRepository", - "codecommit:DeleteBranch", + "codecommit:BatchGet*", + "codecommit:BatchDescribe*", "codecommit:Get*", - "codecommit:GitPull", - "codecommit:GitPush", "codecommit:List*", + "codecommit:Create*", + "codecommit:DeleteBranch", + "codecommit:DeleteFile", + "codecommit:Describe*", "codecommit:Put*", + "codecommit:Post*", + "codecommit:Merge*", + "codecommit:TagResource", "codecommit:Test*", - "codecommit:Update*" + "codecommit:UntagResource", + "codecommit:Update*", + "codecommit:GitPull", + "codecommit:GitPush" ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "events:DeleteRule", + "events:DescribeRule", + "events:DisableRule", + "events:EnableRule", + "events:PutRule", + "events:PutTargets", + "events:RemoveTargets", + "events:ListTargetsByRule" + ], + "Effect": "Allow", + "Resource": "arn:aws:events:*:*:rule/codecommit*", + "Sid": "CloudWatchEventsCodeCommitRulesAccess" + }, + { + "Action": [ + "sns:Subscribe", + "sns:Unsubscribe" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:codecommit*", + "Sid": "SNSTopicAndSubscriptionAccess" + }, + { + "Action": [ + "sns:ListTopics", + "sns:ListSubscriptionsByTopic", + "sns:GetTopicAttributes" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "SNSTopicAndSubscriptionReadAccess" + }, + { + "Action": [ + "lambda:ListFunctions" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "LambdaReadOnlyListAccess" + }, + { + "Action": [ + "iam:ListUsers" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "IAMReadOnlyListAccess" + }, + { + "Action": [ + "iam:ListAccessKeys", + "iam:ListSSHPublicKeys", + "iam:ListServiceSpecificCredentials", + "iam:ListAccessKeys", + "iam:GetSSHPublicKey" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:user/${aws:username}", + "Sid": "IAMReadOnlyConsoleAccess" + }, + { + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:user/${aws:username}", + "Sid": "IAMUserSSHKeys" + }, + { + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ResetServiceSpecificCredential" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:user/${aws:username}", + "Sid": "IAMSelfManageServiceSpecificCredentials" } ], "Version": "2012-10-17" @@ -761,27 +2809,77 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI4UIINUVGB5SEC57G", "PolicyName": "AWSCodeCommitPowerUser", - "UpdateDate": "2017-05-22T21:12:48+00:00", - "VersionId": "v3" + "UpdateDate": "2019-05-30T19:37:08+00:00", + "VersionId": "v6" }, "AWSCodeCommitReadOnly": { "Arn": "arn:aws:iam::aws:policy/AWSCodeCommitReadOnly", "AttachmentCount": 0, "CreateDate": "2015-07-09T17:05:06+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { "Action": [ - "codecommit:BatchGetRepositories", + "codecommit:BatchGet*", + "codecommit:BatchDescribe*", "codecommit:Get*", - "codecommit:GitPull", - "codecommit:List*" + "codecommit:Describe*", + "codecommit:List*", + "codecommit:GitPull" ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "events:DescribeRule", + "events:ListTargetsByRule" + ], + "Effect": "Allow", + "Resource": "arn:aws:events:*:*:rule/codecommit*", + "Sid": "CloudWatchEventsCodeCommitRulesReadOnlyAccess" + }, + { + "Action": [ + "sns:ListTopics", + "sns:ListSubscriptionsByTopic", + "sns:GetTopicAttributes" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "SNSSubscriptionAccess" + }, + { + "Action": [ + "lambda:ListFunctions" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "LambdaReadOnlyListAccess" + }, + { + "Action": [ + "iam:ListUsers" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "IAMReadOnlyListAccess" + }, + { + "Action": [ + "iam:ListAccessKeys", + "iam:ListSSHPublicKeys", + "iam:ListServiceSpecificCredentials", + "iam:ListAccessKeys", + "iam:GetSSHPublicKey" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:user/${aws:username}", + "Sid": "IAMReadOnlyConsoleAccess" } ], "Version": "2012-10-17" @@ -789,10 +2887,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJACNSXR7Z2VLJW3D6", "PolicyName": "AWSCodeCommitReadOnly", - "UpdateDate": "2015-07-09T17:05:06+00:00", - "VersionId": "v1" + "UpdateDate": "2019-05-15T17:26:42+00:00", + "VersionId": "v3" }, "AWSCodeDeployDeployerAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCodeDeployDeployerAccess", @@ -818,6 +2917,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJUWEPOMGLMVXJAPUI", "PolicyName": "AWSCodeDeployDeployerAccess", "UpdateDate": "2015-05-19T18:18:43+00:00", @@ -841,6 +2941,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIONKN3TJZUKXCHXWC", "PolicyName": "AWSCodeDeployFullAccess", "UpdateDate": "2015-05-19T18:13:23+00:00", @@ -868,6 +2969,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAILZHHKCKB4NE7XOIQ", "PolicyName": "AWSCodeDeployReadOnlyAccess", "UpdateDate": "2015-05-19T18:21:32+00:00", @@ -876,7 +2978,7 @@ aws_managed_policies_data = """ "AWSCodeDeployRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSCodeDeployRole", "AttachmentCount": 0, - "CreateDate": "2017-09-11T19:09:51+00:00", + "CreateDate": "2015-05-04T18:05:37+00:00", "DefaultVersionId": "v6", "Document": { "Statement": [ @@ -931,15 +3033,213 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ2NKMKD73QS5NBFLA", "PolicyName": "AWSCodeDeployRole", "UpdateDate": "2017-09-11T19:09:51+00:00", "VersionId": "v6" }, + "AWSCodeDeployRoleForECS": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeDeployRoleForECS", + "AttachmentCount": 0, + "CreateDate": "2018-11-27T20:40:57+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ecs:DescribeServices", + "ecs:CreateTaskSet", + "ecs:UpdateServicePrimaryTaskSet", + "ecs:DeleteTaskSet", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:ModifyRule", + "lambda:InvokeFunction", + "cloudwatch:DescribeAlarms", + "sns:Publish", + "s3:GetObject", + "s3:GetObjectMetadata", + "s3:GetObjectVersion" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "ecs-tasks.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIIL3KXEKRGEN2HFIO", + "PolicyName": "AWSCodeDeployRoleForECS", + "UpdateDate": "2018-12-19T17:57:04+00:00", + "VersionId": "v2" + }, + "AWSCodeDeployRoleForECSLimited": { + "Arn": "arn:aws:iam::aws:policy/AWSCodeDeployRoleForECSLimited", + "AttachmentCount": 0, + "CreateDate": "2018-11-27T20:42:42+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ecs:DescribeServices", + "ecs:CreateTaskSet", + "ecs:UpdateServicePrimaryTaskSet", + "ecs:DeleteTaskSet", + "cloudwatch:DescribeAlarms" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:CodeDeployTopic_*" + }, + { + "Action": [ + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:ModifyRule" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "lambda:InvokeFunction" + ], + "Effect": "Allow", + "Resource": "arn:aws:lambda:*:*:function:CodeDeployHook_*" + }, + { + "Action": [ + "s3:GetObject", + "s3:GetObjectMetadata", + "s3:GetObjectVersion" + ], + "Condition": { + "StringEquals": { + "s3:ExistingObjectTag/UseWithCodeDeploy": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "ecs-tasks.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/ecsTaskExecutionRole", + "arn:aws:iam::*:role/ECSTaskExecution*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ6Z7L2IOXEFFOGD2M", + "PolicyName": "AWSCodeDeployRoleForECSLimited", + "UpdateDate": "2018-12-19T18:06:16+00:00", + "VersionId": "v2" + }, + "AWSCodeDeployRoleForLambda": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSCodeDeployRoleForLambda", + "AttachmentCount": 0, + "CreateDate": "2017-11-28T14:05:44+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:DescribeAlarms", + "lambda:UpdateAlias", + "lambda:GetAlias", + "sns:Publish" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject", + "s3:GetObjectVersion" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::*/CodeDeploy/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:GetObjectVersion" + ], + "Condition": { + "StringEquals": { + "s3:ExistingObjectTag/UseWithCodeDeploy": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "lambda:InvokeFunction" + ], + "Effect": "Allow", + "Resource": "arn:aws:lambda:*:*:function:CodeDeployHook_*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJA3RQZIKNOSJ4ZQSA", + "PolicyName": "AWSCodeDeployRoleForLambda", + "UpdateDate": "2017-12-01T22:32:58+00:00", + "VersionId": "v2" + }, "AWSCodePipelineApproverAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCodePipelineApproverAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-02T17:24:58+00:00", + "CreateDate": "2016-07-28T18:59:17+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -961,6 +3261,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAICXNWK42SQ6LMDXM2", "PolicyName": "AWSCodePipelineApproverAccess", "UpdateDate": "2017-08-02T17:24:58+00:00", @@ -990,6 +3291,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJFW5Z32BTVF76VCYC", "PolicyName": "AWSCodePipelineCustomActionAccess", "UpdateDate": "2015-07-09T17:02:54+00:00", @@ -998,7 +3300,7 @@ aws_managed_policies_data = """ "AWSCodePipelineFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCodePipelineFullAccess", "AttachmentCount": 0, - "CreateDate": "2016-11-01T19:59:46+00:00", + "CreateDate": "2015-07-09T16:58:07+00:00", "DefaultVersionId": "v5", "Document": { "Statement": [ @@ -1038,6 +3340,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJP5LH77KSAT2KHQGG", "PolicyName": "AWSCodePipelineFullAccess", "UpdateDate": "2016-11-01T19:59:46+00:00", @@ -1046,7 +3349,7 @@ aws_managed_policies_data = """ "AWSCodePipelineReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AWSCodePipelineReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-02T17:25:18+00:00", + "CreateDate": "2015-07-09T16:43:57+00:00", "DefaultVersionId": "v6", "Document": { "Statement": [ @@ -1086,6 +3389,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAILFKZXIBOTNC5TO2Q", "PolicyName": "AWSCodePipelineReadOnlyAccess", "UpdateDate": "2017-08-02T17:25:18+00:00", @@ -1095,7 +3399,7 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AWSCodeStarFullAccess", "AttachmentCount": 0, "CreateDate": "2017-04-19T16:23:19+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -1103,7 +3407,9 @@ aws_managed_policies_data = """ "codestar:*", "ec2:DescribeKeyPairs", "ec2:DescribeVpcs", - "ec2:DescribeSubnets" + "ec2:DescribeSubnets", + "cloud9:DescribeEnvironment*", + "cloud9:ValidateEnvironmentName" ], "Effect": "Allow", "Resource": "*", @@ -1126,27 +3432,47 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIXI233TFUGLZOJBEC", "PolicyName": "AWSCodeStarFullAccess", - "UpdateDate": "2017-04-19T16:23:19+00:00", - "VersionId": "v1" + "UpdateDate": "2018-01-10T21:54:06+00:00", + "VersionId": "v2" }, "AWSCodeStarServiceRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSCodeStarServiceRole", "AttachmentCount": 0, - "CreateDate": "2017-07-13T19:53:22+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2017-04-19T15:20:50+00:00", + "DefaultVersionId": "v9", "Document": { "Statement": [ + { + "Action": [ + "events:PutTargets", + "events:RemoveTargets", + "events:PutRule", + "events:DeleteRule", + "events:DescribeRule" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:events:*:*:rule/awscodestar-*" + ], + "Sid": "ProjectEventRules" + }, { "Action": [ "cloudformation:*Stack*", + "cloudformation:CreateChangeSet", + "cloudformation:ExecuteChangeSet", + "cloudformation:DeleteChangeSet", "cloudformation:GetTemplate" ], "Effect": "Allow", "Resource": [ "arn:aws:cloudformation:*:*:stack/awscodestar-*", - "arn:aws:cloudformation:*:*:stack/awseb-*" + "arn:aws:cloudformation:*:*:stack/awseb-*", + "arn:aws:cloudformation:*:*:stack/aws-cloud9-*", + "arn:aws:cloudformation:*:aws:transform/CodeStar*" ], "Sid": "ProjectStack" }, @@ -1184,12 +3510,7 @@ aws_managed_policies_data = """ }, { "Action": [ - "codestar:*Project", - "codestar:*Resource*", - "codestar:List*", - "codestar:Describe*", - "codestar:Get*", - "codestar:AssociateTeamMember", + "codestar:*", "codecommit:*", "codepipeline:*", "codedeploy:*", @@ -1202,7 +3523,11 @@ aws_managed_policies_data = """ "elasticloadbalancing:*", "iam:ListRoles", "logs:*", - "sns:*" + "sns:*", + "cloud9:CreateEnvironmentEC2", + "cloud9:DeleteEnvironment", + "cloud9:DescribeEnvironment*", + "cloud9:ListEnvironments" ], "Effect": "Allow", "Resource": "*", @@ -1217,6 +3542,7 @@ aws_managed_policies_data = """ "iam:DetachRolePolicy", "iam:GetRole", "iam:PassRole", + "iam:GetRolePolicy", "iam:PutRolePolicy", "iam:SetDefaultPolicyVersion", "iam:CreatePolicy", @@ -1257,7 +3583,9 @@ aws_managed_policies_data = """ "iam:CreatePolicyVersion", "iam:DeletePolicyVersion", "iam:ListEntitiesForPolicy", - "iam:ListPolicyVersions" + "iam:ListPolicyVersions", + "iam:GetPolicy", + "iam:GetPolicyVersion" ], "Effect": "Allow", "Resource": [ @@ -1275,6 +3603,29 @@ aws_managed_policies_data = """ "arn:aws:iam::*:role/service-role/aws-codestar-service-role" ], "Sid": "InspectServiceRole" + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "cloud9.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "IAMLinkRole" + }, + { + "Action": [ + "config:DescribeConfigRules" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DescribeConfigRuleForARN" } ], "Version": "2012-10-17" @@ -1282,16 +3633,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIN6D4M2KD3NBOC4M4", "PolicyName": "AWSCodeStarServiceRole", - "UpdateDate": "2017-07-13T19:53:22+00:00", - "VersionId": "v2" + "UpdateDate": "2019-04-24T19:25:28+00:00", + "VersionId": "v9" }, "AWSConfigRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSConfigRole", "AttachmentCount": 0, - "CreateDate": "2017-08-14T19:04:46+00:00", - "DefaultVersionId": "v10", + "CreateDate": "2015-04-02T17:36:23+00:00", + "DefaultVersionId": "v25", "Document": { "Statement": [ { @@ -1302,7 +3654,11 @@ aws_managed_policies_data = """ "config:Get*", "config:List*", "config:Describe*", + "config:BatchGet*", + "config:Select*", + "cloudtrail:GetEventSelectors", "cloudtrail:GetTrailStatus", + "cloudtrail:ListTags", "s3:GetObject", "iam:GetAccountAuthorizationDetails", "iam:GetAccountPasswordPolicy", @@ -1315,6 +3671,8 @@ aws_managed_policies_data = """ "iam:GetRolePolicy", "iam:GetUser", "iam:GetUserPolicy", + "iam:GenerateCredentialReport", + "iam:GetCredentialReport", "iam:ListAttachedGroupPolicies", "iam:ListAttachedRolePolicies", "iam:ListAttachedUserPolicies", @@ -1325,6 +3683,7 @@ aws_managed_policies_data = """ "iam:ListPolicyVersions", "iam:ListRolePolicies", "iam:ListUserPolicies", + "iam:ListVirtualMFADevices", "elasticloadbalancing:DescribeLoadBalancers", "elasticloadbalancing:DescribeLoadBalancerAttributes", "elasticloadbalancing:DescribeLoadBalancerPolicies", @@ -1354,6 +3713,10 @@ aws_managed_policies_data = """ "s3:GetLifecycleConfiguration", "s3:GetReplicationConfiguration", "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:GetEncryptionConfiguration", + "s3:GetBucketPublicAccessBlock", + "s3:GetAccountPublicAccessBlock", "redshift:DescribeClusterParameterGroups", "redshift:DescribeClusterParameters", "redshift:DescribeClusterSecurityGroups", @@ -1374,7 +3737,29 @@ aws_managed_policies_data = """ "autoscaling:DescribeLifecycleHooks", "autoscaling:DescribePolicies", "autoscaling:DescribeScheduledActions", - "autoscaling:DescribeTags" + "autoscaling:DescribeTags", + "lambda:GetFunction", + "lambda:GetPolicy", + "lambda:ListFunctions", + "lambda:GetAlias", + "lambda:ListAliases", + "waf-regional:GetWebACLForResource", + "waf-regional:GetWebACL", + "cloudfront:ListTagsForResource", + "guardduty:ListDetectors", + "guardduty:GetMasterAccount", + "guardduty:GetDetector", + "codepipeline:ListPipelines", + "codepipeline:GetPipeline", + "codepipeline:GetPipelineState", + "kms:ListKeys", + "kms:GetKeyRotationStatus", + "kms:DescribeKey", + "ssm:DescribeDocument", + "ssm:GetDocument", + "ssm:DescribeAutomationExecutions", + "ssm:GetAutomationExecution", + "shield:DescribeProtection" ], "Effect": "Allow", "Resource": "*" @@ -1385,16 +3770,45 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIQRXRDRGJUA33ELIO", "PolicyName": "AWSConfigRole", - "UpdateDate": "2017-08-14T19:04:46+00:00", - "VersionId": "v10" + "UpdateDate": "2019-05-13T21:29:39+00:00", + "VersionId": "v25" + }, + "AWSConfigRoleForOrganizations": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSConfigRoleForOrganizations", + "AttachmentCount": 0, + "CreateDate": "2018-03-19T22:53:01+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "organizations:ListAccounts", + "organizations:DescribeOrganization", + "organizations:ListAWSServiceAccessForOrganization" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIEHGYAUTHXSXZAW2E", + "PolicyName": "AWSConfigRoleForOrganizations", + "UpdateDate": "2018-03-19T22:53:01+00:00", + "VersionId": "v1" }, "AWSConfigRulesExecutionRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSConfigRulesExecutionRole", "AttachmentCount": 0, "CreateDate": "2016-03-25T17:59:36+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -1409,7 +3823,9 @@ aws_managed_policies_data = """ "config:Put*", "config:Get*", "config:List*", - "config:Describe*" + "config:Describe*", + "config:BatchGet*", + "config:Select*" ], "Effect": "Allow", "Resource": "*" @@ -1420,16 +3836,153 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJUB3KIKTA4PU4OYAA", "PolicyName": "AWSConfigRulesExecutionRole", - "UpdateDate": "2016-03-25T17:59:36+00:00", - "VersionId": "v1" + "UpdateDate": "2019-05-13T21:33:30+00:00", + "VersionId": "v3" + }, + "AWSConfigServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSConfigServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-05-30T23:31:46+00:00", + "DefaultVersionId": "v11", + "Document": { + "Statement": [ + { + "Action": [ + "cloudtrail:DescribeTrails", + "cloudtrail:GetEventSelectors", + "ec2:Describe*", + "config:Put*", + "config:Get*", + "config:List*", + "config:Describe*", + "config:BatchGet*", + "config:Select*", + "cloudtrail:GetTrailStatus", + "cloudtrail:ListTags", + "iam:GenerateCredentialReport", + "iam:GetCredentialReport", + "iam:GetAccountAuthorizationDetails", + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary", + "iam:GetGroup", + "iam:GetGroupPolicy", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:GetUser", + "iam:GetUserPolicy", + "iam:ListAttachedGroupPolicies", + "iam:ListAttachedRolePolicies", + "iam:ListAttachedUserPolicies", + "iam:ListEntitiesForPolicy", + "iam:ListGroupPolicies", + "iam:ListGroupsForUser", + "iam:ListInstanceProfilesForRole", + "iam:ListPolicyVersions", + "iam:ListRolePolicies", + "iam:ListUserPolicies", + "iam:ListVirtualMFADevices", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTags", + "acm:DescribeCertificate", + "acm:ListCertificates", + "acm:ListTagsForCertificate", + "rds:DescribeDBInstances", + "rds:DescribeDBSecurityGroups", + "rds:DescribeDBSnapshotAttributes", + "rds:DescribeDBSnapshots", + "rds:DescribeDBSubnetGroups", + "rds:DescribeEventSubscriptions", + "rds:ListTagsForResource", + "rds:DescribeDBClusters", + "s3:GetAccelerateConfiguration", + "s3:GetBucketAcl", + "s3:GetBucketCORS", + "s3:GetBucketLocation", + "s3:GetBucketLogging", + "s3:GetBucketNotification", + "s3:GetBucketPolicy", + "s3:GetBucketRequestPayment", + "s3:GetBucketTagging", + "s3:GetBucketVersioning", + "s3:GetBucketWebsite", + "s3:GetLifecycleConfiguration", + "s3:GetReplicationConfiguration", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:GetEncryptionConfiguration", + "s3:GetBucketPublicAccessBlock", + "s3:GetAccountPublicAccessBlock", + "redshift:DescribeClusterParameterGroups", + "redshift:DescribeClusterParameters", + "redshift:DescribeClusterSecurityGroups", + "redshift:DescribeClusterSnapshots", + "redshift:DescribeClusterSubnetGroups", + "redshift:DescribeClusters", + "redshift:DescribeEventSubscriptions", + "redshift:DescribeLoggingStatus", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTable", + "dynamodb:ListTables", + "dynamodb:ListTagsOfResource", + "cloudwatch:DescribeAlarms", + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingPolicies", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeLifecycleHooks", + "autoscaling:DescribePolicies", + "autoscaling:DescribeScheduledActions", + "autoscaling:DescribeTags", + "lambda:GetFunction", + "lambda:GetPolicy", + "lambda:ListFunctions", + "lambda:GetAlias", + "lambda:ListAliases", + "waf-regional:GetWebACLForResource", + "waf-regional:GetWebACL", + "cloudfront:ListTagsForResource", + "guardduty:ListDetectors", + "guardduty:GetMasterAccount", + "guardduty:GetDetector", + "codepipeline:ListPipelines", + "codepipeline:GetPipeline", + "codepipeline:GetPipelineState", + "kms:ListKeys", + "kms:GetKeyRotationStatus", + "kms:DescribeKey", + "ssm:DescribeDocument", + "ssm:GetDocument", + "ssm:DescribeAutomationExecutions", + "ssm:GetAutomationExecution", + "shield:DescribeProtection" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJUCWFHNZER665LLQQ", + "PolicyName": "AWSConfigServiceRolePolicy", + "UpdateDate": "2019-05-13T21:18:44+00:00", + "VersionId": "v11" }, "AWSConfigUserAccess": { "Arn": "arn:aws:iam::aws:policy/AWSConfigUserAccess", "AttachmentCount": 0, - "CreateDate": "2016-08-30T19:15:19+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2015-02-18T19:38:41+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -1438,6 +3991,7 @@ aws_managed_policies_data = """ "config:Describe*", "config:Deliver*", "config:List*", + "config:Select*", "tag:GetResources", "tag:GetTagKeys", "cloudtrail:DescribeTrails", @@ -1453,15 +4007,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIWTTSFJ7KKJE3MWGA", "PolicyName": "AWSConfigUserAccess", - "UpdateDate": "2016-08-30T19:15:19+00:00", - "VersionId": "v3" + "UpdateDate": "2019-03-18T20:27:47+00:00", + "VersionId": "v4" }, "AWSConnector": { "Arn": "arn:aws:iam::aws:policy/AWSConnector", "AttachmentCount": 0, - "CreateDate": "2015-09-28T19:50:38+00:00", + "CreateDate": "2015-02-11T17:14:31+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -1545,16 +4100,182 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ6YATONJHICG3DJ3U", "PolicyName": "AWSConnector", "UpdateDate": "2015-09-28T19:50:38+00:00", "VersionId": "v3" }, + "AWSControlTowerServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSControlTowerServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2019-05-03T18:19:11+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "cloudformation:CreateStack", + "cloudformation:CreateStackInstances", + "cloudformation:CreateStackSet", + "cloudformation:DeleteStack", + "cloudformation:DeleteStackInstances", + "cloudformation:DeleteStackSet", + "cloudformation:DescribeStackInstance", + "cloudformation:DescribeStacks", + "cloudformation:DescribeStackSet", + "cloudformation:DescribeStackSetOperation", + "cloudformation:GetTemplate", + "cloudformation:ListStackInstances", + "cloudformation:UpdateStack", + "cloudformation:UpdateStackInstances", + "cloudformation:UpdateStackSet" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/AWSControlTower*/*", + "arn:aws:cloudformation:*:*:stack/StackSet-AWSControlTower*/*", + "arn:aws:cloudformation:*:*:stackset/AWSControlTower*:*" + ] + }, + { + "Action": [ + "cloudtrail:CreateTrail", + "cloudtrail:DeleteTrail", + "cloudtrail:GetTrailStatus", + "cloudtrail:StartLogging", + "cloudtrail:StopLogging", + "cloudtrail:UpdateTrail", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:PutRetentionPolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:aws-controltower/CloudTrailLogs:*", + "arn:aws:cloudtrail:*:*:trail/aws-controltower*" + ] + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-controltower*/*" + ] + }, + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/AWSControlTowerExecution" + ] + }, + { + "Action": [ + "cloudtrail:DescribeTrails", + "ec2:DescribeAvailabilityZones", + "iam:ListRoles", + "logs:CreateLogGroup", + "logs:DescribeLogGroups", + "organizations:CreateAccount", + "organizations:DescribeAccount", + "organizations:DescribeCreateAccountStatus", + "organizations:DescribeOrganization", + "organizations:DescribeOrganizationalUnit", + "organizations:DescribePolicy", + "organizations:ListAccounts", + "organizations:ListAccountsForParent", + "organizations:ListAWSServiceAccessForOrganization", + "organizations:ListChildren", + "organizations:ListOrganizationalUnitsForParent", + "organizations:ListParents", + "organizations:ListPoliciesForTarget", + "organizations:ListRoots", + "organizations:MoveAccount", + "servicecatalog:AssociatePrincipalWithPortfolio" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:GetRole", + "iam:GetUser", + "iam:ListAttachedRolePolicies", + "iam:GetRolePolicy" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/service-role/AWSControlTowerStackSetRole", + "arn:aws:iam::*:role/service-role/AWSControlTowerCloudTrailRole" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4MW35THVLF", + "PolicyName": "AWSControlTowerServiceRolePolicy", + "UpdateDate": "2019-05-23T19:14:24+00:00", + "VersionId": "v2" + }, + "AWSDataLifecycleManagerServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSDataLifecycleManagerServiceRole", + "AttachmentCount": 0, + "CreateDate": "2018-07-06T19:34:16+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:DeleteSnapshot", + "ec2:DescribeInstances", + "ec2:DescribeVolumes", + "ec2:DescribeSnapshots" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Effect": "Allow", + "Resource": "arn:aws:ec2:*::snapshot/*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIZRLOKFUFE7YXQOJS", + "PolicyName": "AWSDataLifecycleManagerServiceRole", + "UpdateDate": "2019-05-29T16:44:12+00:00", + "VersionId": "v2" + }, "AWSDataPipelineRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSDataPipelineRole", "AttachmentCount": 0, - "CreateDate": "2016-02-22T17:17:38+00:00", - "DefaultVersionId": "v5", + "CreateDate": "2015-02-06T18:41:24+00:00", + "DefaultVersionId": "v6", "Document": { "Statement": [ { @@ -1622,6 +4343,19 @@ aws_managed_policies_data = """ "Resource": [ "*" ] + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "elasticmapreduce.amazonaws.com", + "spot.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -1629,15 +4363,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIKCP6XS3ESGF4GLO2", "PolicyName": "AWSDataPipelineRole", - "UpdateDate": "2016-02-22T17:17:38+00:00", - "VersionId": "v5" + "UpdateDate": "2017-12-22T23:43:28+00:00", + "VersionId": "v6" }, "AWSDataPipeline_FullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSDataPipeline_FullAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-17T18:48:39+00:00", + "CreateDate": "2017-01-19T23:14:54+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -1676,6 +4411,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIXOFIG7RSBMRPHXJ4", "PolicyName": "AWSDataPipeline_FullAccess", "UpdateDate": "2017-08-17T18:48:39+00:00", @@ -1684,7 +4420,7 @@ aws_managed_policies_data = """ "AWSDataPipeline_PowerUser": { "Arn": "arn:aws:iam::aws:policy/AWSDataPipeline_PowerUser", "AttachmentCount": 0, - "CreateDate": "2017-08-17T18:49:42+00:00", + "CreateDate": "2017-01-19T23:16:46+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -1722,11 +4458,826 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIMXGLVY6DVR24VTYS", "PolicyName": "AWSDataPipeline_PowerUser", "UpdateDate": "2017-08-17T18:49:42+00:00", "VersionId": "v2" }, + "AWSDataSyncFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSDataSyncFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-18T19:40:36+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "datasync:*", + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:ModifyNetworkInterfaceAttribute", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DescribeMountTargets", + "iam:GetRole", + "iam:ListRoles", + "logs:CreateLogGroup", + "logs:DescribeLogGroups", + "s3:ListAllMyBuckets", + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "datasync.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJGOHCDUQULZJKDGT4", + "PolicyName": "AWSDataSyncFullAccess", + "UpdateDate": "2019-01-18T19:40:36+00:00", + "VersionId": "v1" + }, + "AWSDataSyncReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSDataSyncReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-18T19:18:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "datasync:Describe*", + "datasync:List*", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DescribeMountTargets", + "iam:GetRole", + "iam:ListRoles", + "logs:DescribeLogGroups", + "s3:ListAllMyBuckets", + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJRYVEZEDR7ZEAGYLY", + "PolicyName": "AWSDataSyncReadOnlyAccess", + "UpdateDate": "2019-01-18T19:18:44+00:00", + "VersionId": "v1" + }, + "AWSDeepLensLambdaFunctionAccessPolicy": { + "Arn": "arn:aws:iam::aws:policy/AWSDeepLensLambdaFunctionAccessPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T15:47:18+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "s3:ListBucket", + "s3:GetObject", + "s3:ListObjects" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::deeplens*/*", + "arn:aws:s3:::deeplens*" + ], + "Sid": "DeepLensS3ObjectAccess" + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:PutLogEvents", + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/greengrass/*", + "Sid": "DeepLensGreenGrassCloudWatchAccess" + }, + { + "Action": [ + "deeplens:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DeepLensAccess" + }, + { + "Action": [ + "kinesisvideo:DescribeStream", + "kinesisvideo:CreateStream", + "kinesisvideo:GetDataEndpoint", + "kinesisvideo:PutMedia" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DeepLensKinesisVideoAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIKIEE4PRM54V4G3ZG", + "PolicyName": "AWSDeepLensLambdaFunctionAccessPolicy", + "UpdateDate": "2018-05-29T22:08:02+00:00", + "VersionId": "v3" + }, + "AWSDeepLensServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSDeepLensServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T15:46:36+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "iot:CreateThing", + "iot:DeleteThing", + "iot:DeleteThingShadow", + "iot:DescribeThing", + "iot:GetThingShadow", + "iot:UpdateThing", + "iot:UpdateThingShadow" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iot:*:*:thing/deeplens*" + ], + "Sid": "DeepLensIoTThingAccess" + }, + { + "Action": [ + "iot:AttachThingPrincipal", + "iot:DetachThingPrincipal", + "iot:UpdateCertificate", + "iot:DeleteCertificate", + "iot:DetachPrincipalPolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iot:*:*:thing/deeplens*", + "arn:aws:iot:*:*:cert/*" + ], + "Sid": "DeepLensIoTCertificateAccess" + }, + { + "Action": [ + "iot:CreateKeysAndCertificate", + "iot:CreatePolicy", + "iot:CreatePolicyVersion" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DeepLensIoTCreateCertificateAndPolicyAccess" + }, + { + "Action": [ + "iot:AttachPrincipalPolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iot:*:*:policy/deeplens*", + "arn:aws:iot:*:*:cert/*" + ], + "Sid": "DeepLensIoTAttachCertificatePolicyAccess" + }, + { + "Action": [ + "iot:GetThingShadow", + "iot:UpdateThingShadow" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iot:*:*:thing/deeplens*" + ], + "Sid": "DeepLensIoTDataAccess" + }, + { + "Action": [ + "iot:DescribeEndpoint" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DeepLensIoTEndpointAccess" + }, + { + "Action": [ + "deeplens:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DeepLensAccess" + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::deeplens*" + ], + "Sid": "DeepLensS3ObjectAccess" + }, + { + "Action": [ + "s3:DeleteBucket", + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::deeplens*" + ], + "Sid": "DeepLensS3Buckets" + }, + { + "Action": [ + "s3:CreateBucket" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DeepLensCreateS3Buckets" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "greengrass.amazonaws.com", + "sagemaker.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DeepLensIAMPassRoleAccess" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEqualsIfExists": { + "iam:PassedToService": "lambda.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/AWSDeepLens*", + "arn:aws:iam::*:role/service-role/AWSDeepLens*" + ], + "Sid": "DeepLensIAMLambdaPassRoleAccess" + }, + { + "Action": [ + "greengrass:AssociateRoleToGroup", + "greengrass:AssociateServiceRoleToAccount", + "greengrass:CreateResourceDefinition", + "greengrass:CreateResourceDefinitionVersion", + "greengrass:CreateCoreDefinition", + "greengrass:CreateCoreDefinitionVersion", + "greengrass:CreateDeployment", + "greengrass:CreateFunctionDefinition", + "greengrass:CreateFunctionDefinitionVersion", + "greengrass:CreateGroup", + "greengrass:CreateGroupCertificateAuthority", + "greengrass:CreateGroupVersion", + "greengrass:CreateLoggerDefinition", + "greengrass:CreateLoggerDefinitionVersion", + "greengrass:CreateSubscriptionDefinition", + "greengrass:CreateSubscriptionDefinitionVersion", + "greengrass:DeleteCoreDefinition", + "greengrass:DeleteFunctionDefinition", + "greengrass:DeleteGroup", + "greengrass:DeleteLoggerDefinition", + "greengrass:DeleteSubscriptionDefinition", + "greengrass:DisassociateRoleFromGroup", + "greengrass:DisassociateServiceRoleFromAccount", + "greengrass:GetAssociatedRole", + "greengrass:GetConnectivityInfo", + "greengrass:GetCoreDefinition", + "greengrass:GetCoreDefinitionVersion", + "greengrass:GetDeploymentStatus", + "greengrass:GetDeviceDefinition", + "greengrass:GetDeviceDefinitionVersion", + "greengrass:GetFunctionDefinition", + "greengrass:GetFunctionDefinitionVersion", + "greengrass:GetGroup", + "greengrass:GetGroupCertificateAuthority", + "greengrass:GetGroupCertificateConfiguration", + "greengrass:GetGroupVersion", + "greengrass:GetLoggerDefinition", + "greengrass:GetLoggerDefinitionVersion", + "greengrass:GetResourceDefinition", + "greengrass:GetServiceRoleForAccount", + "greengrass:GetSubscriptionDefinition", + "greengrass:GetSubscriptionDefinitionVersion", + "greengrass:ListCoreDefinitionVersions", + "greengrass:ListCoreDefinitions", + "greengrass:ListDeployments", + "greengrass:ListDeviceDefinitionVersions", + "greengrass:ListDeviceDefinitions", + "greengrass:ListFunctionDefinitionVersions", + "greengrass:ListFunctionDefinitions", + "greengrass:ListGroupCertificateAuthorities", + "greengrass:ListGroupVersions", + "greengrass:ListGroups", + "greengrass:ListLoggerDefinitionVersions", + "greengrass:ListLoggerDefinitions", + "greengrass:ListSubscriptionDefinitionVersions", + "greengrass:ListSubscriptionDefinitions", + "greengrass:ResetDeployments", + "greengrass:UpdateConnectivityInfo", + "greengrass:UpdateCoreDefinition", + "greengrass:UpdateDeviceDefinition", + "greengrass:UpdateFunctionDefinition", + "greengrass:UpdateGroup", + "greengrass:UpdateGroupCertificateConfiguration", + "greengrass:UpdateLoggerDefinition", + "greengrass:UpdateSubscriptionDefinition", + "greengrass:UpdateResourceDefinition" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DeepLensGreenGrassAccess" + }, + { + "Action": [ + "lambda:CreateFunction", + "lambda:DeleteFunction", + "lambda:GetFunction", + "lambda:GetFunctionConfiguration", + "lambda:ListFunctions", + "lambda:ListVersionsByFunction", + "lambda:PublishVersion", + "lambda:UpdateFunctionCode", + "lambda:UpdateFunctionConfiguration" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:lambda:*:*:function:deeplens*" + ], + "Sid": "DeepLensLambdaAdminFunctionAccess" + }, + { + "Action": [ + "lambda:GetFunction", + "lambda:GetFunctionConfiguration", + "lambda:ListFunctions", + "lambda:ListVersionsByFunction" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:lambda:*:*:function:*" + ], + "Sid": "DeepLensLambdaUsersFunctionAccess" + }, + { + "Action": [ + "sagemaker:CreateTrainingJob", + "sagemaker:DescribeTrainingJob", + "sagemaker:StopTrainingJob" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sagemaker:*:*:training-job/deeplens*" + ], + "Sid": "DeepLensSageMakerWriteAccess" + }, + { + "Action": [ + "sagemaker:DescribeTrainingJob" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sagemaker:*:*:training-job/*" + ], + "Sid": "DeepLensSageMakerReadAccess" + }, + { + "Action": [ + "acuity:CreateStream", + "acuity:DescribeStream", + "acuity:DeleteStream" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:acuity:*:*:stream/deeplens*/*" + ], + "Sid": "DeepLensAcuityStreamAccess" + }, + { + "Action": [ + "acuity:GetDataEndpoint" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "DeepLensAcuityEndpointAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJK2Z2S7FPJFCYGR72", + "PolicyName": "AWSDeepLensServiceRolePolicy", + "UpdateDate": "2018-06-07T21:25:01+00:00", + "VersionId": "v5" + }, + "AWSDeepRacerCloudFormationAccessPolicy": { + "Arn": "arn:aws:iam::aws:policy/AWSDeepRacerCloudFormationAccessPolicy", + "AttachmentCount": 0, + "CreateDate": "2019-02-28T21:59:49+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudformation:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:AllocateAddress", + "ec2:AttachInternetGateway", + "ec2:AssociateRouteTable", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateNetworkAcl", + "ec2:CreateNetworkAclEntry", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateTags", + "ec2:CreateVpc", + "ec2:DeleteInternetGateway", + "ec2:DeleteNatGateway", + "ec2:DeleteNetworkAcl", + "ec2:DeleteNetworkAclEntry", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSubnet", + "ec2:DeleteTags", + "ec2:DeleteVpc", + "ec2:DescribeAddresses", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVpcs", + "ec2:DetachInternetGateway", + "ec2:DisassociateRouteTable", + "ec2:ModifyVpcAttribute", + "ec2:ReleaseAddress", + "ec2:ReplaceNetworkAclAssociation", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJYG7FM75UF5CW5ICS", + "PolicyName": "AWSDeepRacerCloudFormationAccessPolicy", + "UpdateDate": "2019-02-28T21:59:49+00:00", + "VersionId": "v1" + }, + "AWSDeepRacerRoboMakerAccessPolicy": { + "Arn": "arn:aws:iam::aws:policy/AWSDeepRacerRoboMakerAccessPolicy", + "AttachmentCount": 0, + "CreateDate": "2019-02-28T21:59:58+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "robomaker:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:PutMetricData", + "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/robomaker/SimulationJobs", + "arn:aws:logs:*:*:log-group:/aws/robomaker/SimulationJobs:log-stream:*" + ] + }, + { + "Action": [ + "s3:GetObject", + "s3:GetBucketLocation", + "s3:ListBucket", + "s3:ListAllMyBuckets", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*DeepRacer*", + "arn:aws:s3:::*Deepracer*", + "arn:aws:s3:::*deepracer*", + "arn:aws:s3:::dr-*" + ] + }, + { + "Action": [ + "s3:GetObject" + ], + "Condition": { + "StringEqualsIgnoreCase": { + "s3:ExistingObjectTag/DeepRacer": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "kinesisvideo:CreateStream", + "kinesisvideo:DescribeStream", + "kinesisvideo:GetDataEndpoint", + "kinesisvideo:PutMedia", + "kinesisvideo:TagStream" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesisvideo:*:*:stream/dr-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIUKGYRTDCUFOMRGAM", + "PolicyName": "AWSDeepRacerRoboMakerAccessPolicy", + "UpdateDate": "2019-02-28T21:59:58+00:00", + "VersionId": "v1" + }, + "AWSDeepRacerServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSDeepRacerServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2019-02-28T21:58:09+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "deepracer:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "robomaker:*", + "sagemaker:*", + "sts:*", + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudformation:ListStackResources", + "cloudformation:DescribeStacks", + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStackResource", + "cloudformation:DescribeStackResources", + "cloudformation:DescribeStackEvents", + "cloudformation:DetectStackDrift", + "cloudformation:DescribeStackDriftDetectionStatus", + "cloudformation:DescribeStackResourceDrifts" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "robomaker.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/AWSDeepRacer*", + "arn:aws:iam::*:role/service-role/AWSDeepRacer*" + ] + }, + { + "Action": [ + "cloudwatch:GetMetricData", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:GetLogEvents", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "lambda:CreateFunction", + "lambda:DeleteFunction", + "lambda:GetFunction", + "lambda:InvokeFunction", + "lambda:UpdateFunctionCode" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:lambda:*:*:function:*DeepRacer*", + "arn:aws:lambda:*:*:function:*Deepracer*", + "arn:aws:lambda:*:*:function:*deepracer*", + "arn:aws:lambda:*:*:function:*dr-*" + ] + }, + { + "Action": [ + "s3:GetObject", + "s3:GetBucketLocation", + "s3:DeleteObject", + "s3:ListBucket", + "s3:PutObject", + "s3:PutBucketPolicy", + "s3:GetBucketAcl" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*DeepRacer*", + "arn:aws:s3:::*Deepracer*", + "arn:aws:s3:::*deepracer*", + "arn:aws:s3:::dr-*" + ] + }, + { + "Action": [ + "s3:GetObject" + ], + "Condition": { + "StringEqualsIgnoreCase": { + "s3:ExistingObjectTag/DeepRacer": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "kinesisvideo:CreateStream", + "kinesisvideo:DeleteStream", + "kinesisvideo:DescribeStream", + "kinesisvideo:GetDataEndpoint", + "kinesisvideo:GetHLSStreamingSessionURL", + "kinesisvideo:GetMedia", + "kinesisvideo:PutMedia", + "kinesisvideo:TagStream" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesisvideo:*:*:stream/dr-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJTUAQLIAVBJ7LZ32S", + "PolicyName": "AWSDeepRacerServiceRolePolicy", + "UpdateDate": "2019-04-06T04:08:05+00:00", + "VersionId": "v2" + }, + "AWSDenyAll": { + "Arn": "arn:aws:iam::aws:policy/AWSDenyAll", + "AttachmentCount": 0, + "CreateDate": "2019-05-01T22:36:14+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "*" + ], + "Effect": "Deny", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4P43IUQ5E5", + "PolicyName": "AWSDenyAll", + "UpdateDate": "2019-05-01T22:36:14+00:00", + "VersionId": "v1" + }, "AWSDeviceFarmFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSDeviceFarmFullAccess", "AttachmentCount": 0, @@ -1747,6 +5298,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJO7KEDP4VYJPNT5UW", "PolicyName": "AWSDeviceFarmFullAccess", "UpdateDate": "2015-07-13T16:37:38+00:00", @@ -1756,12 +5308,14 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AWSDirectConnectFullAccess", "AttachmentCount": 0, "CreateDate": "2015-02-06T18:40:07+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { "Action": [ - "directconnect:*" + "directconnect:*", + "ec2:DescribeVpnGateways", + "ec2:DescribeTransitGateways" ], "Effect": "Allow", "Resource": "*" @@ -1772,21 +5326,24 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJQF2QKZSK74KTIHOW", "PolicyName": "AWSDirectConnectFullAccess", - "UpdateDate": "2015-02-06T18:40:07+00:00", - "VersionId": "v1" + "UpdateDate": "2019-04-30T15:29:29+00:00", + "VersionId": "v3" }, "AWSDirectConnectReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AWSDirectConnectReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2015-02-06T18:40:08+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { "Action": [ - "directconnect:Describe*" + "directconnect:Describe*", + "ec2:DescribeVpnGateways", + "ec2:DescribeTransitGateways" ], "Effect": "Allow", "Resource": "*" @@ -1797,16 +5354,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI23HZ27SI6FQMGNQ2", "PolicyName": "AWSDirectConnectReadOnlyAccess", - "UpdateDate": "2015-02-06T18:40:08+00:00", - "VersionId": "v1" + "UpdateDate": "2019-04-30T15:23:18+00:00", + "VersionId": "v3" }, "AWSDirectoryServiceFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSDirectoryServiceFullAccess", "AttachmentCount": 0, - "CreateDate": "2016-02-24T23:10:36+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2015-02-06T18:41:11+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -1823,10 +5381,19 @@ aws_managed_policies_data = """ "ec2:DescribeVpcs", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeSecurityGroups", "sns:GetTopicAttributes", "sns:ListSubscriptions", "sns:ListSubscriptionsByTopic", - "sns:ListTopics" + "sns:ListTopics", + "iam:ListRoles", + "organizations:ListAccountsForParent", + "organizations:ListRoots", + "organizations:ListAccounts", + "organizations:DescribeOrganization", + "organizations:DescribeAccount", + "organizations:ListOrganizationalUnitsForParent", + "organizations:ListAWSServiceAccessForOrganization" ], "Effect": "Allow", "Resource": "*" @@ -1841,6 +5408,32 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "arn:aws:sns:*:*:DirectoryMonitoring*" + }, + { + "Action": [ + "organizations:EnableAWSServiceAccess", + "organizations:DisableAWSServiceAccess" + ], + "Condition": { + "ForAllValues:StringLike": { + "organizations:ServicePrincipal": [ + "ds.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:network-interface/*", + "arn:aws:ec2:*:*:security-group/*" + ] } ], "Version": "2012-10-17" @@ -1848,16 +5441,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAINAW5ANUWTH3R4ANI", "PolicyName": "AWSDirectoryServiceFullAccess", - "UpdateDate": "2016-02-24T23:10:36+00:00", - "VersionId": "v2" + "UpdateDate": "2019-02-05T20:29:43+00:00", + "VersionId": "v4" }, "AWSDirectoryServiceReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AWSDirectoryServiceReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-02-24T23:11:18+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2015-02-06T18:41:12+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -1873,7 +5467,10 @@ aws_managed_policies_data = """ "sns:ListTopics", "sns:GetTopicAttributes", "sns:ListSubscriptions", - "sns:ListSubscriptionsByTopic" + "sns:ListSubscriptionsByTopic", + "organizations:DescribeAccount", + "organizations:DescribeOrganization", + "organizations:ListAWSServiceAccessForOrganization" ], "Effect": "Allow", "Resource": "*" @@ -1884,23 +5481,160 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIHWYO6WSDNCG64M2W", "PolicyName": "AWSDirectoryServiceReadOnlyAccess", - "UpdateDate": "2016-02-24T23:11:18+00:00", - "VersionId": "v3" + "UpdateDate": "2018-09-25T21:54:01+00:00", + "VersionId": "v4" }, - "AWSEC2SpotServiceRolePolicy": { - "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSEC2SpotServiceRolePolicy", + "AWSDiscoveryContinuousExportFirehosePolicy": { + "Arn": "arn:aws:iam::aws:policy/AWSDiscoveryContinuousExportFirehosePolicy", "AttachmentCount": 0, - "CreateDate": "2017-09-18T18:51:54+00:00", + "CreateDate": "2018-08-09T18:29:39+00:00", "DefaultVersionId": "v1", "Document": { "Statement": [ { "Action": [ - "ec2:DescribeInstances", - "ec2:StartInstances", - "ec2:StopInstances" + "glue:GetTableVersions" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:AbortMultipartUpload", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-application-discovery-service-*", + "arn:aws:s3:::aws-application-discovery-service-*/*" + ] + }, + { + "Action": [ + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/application-discovery-service/firehose:log-stream:*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIX6FHUTEUNXYDFZ7C", + "PolicyName": "AWSDiscoveryContinuousExportFirehosePolicy", + "UpdateDate": "2018-08-09T18:29:39+00:00", + "VersionId": "v1" + }, + "AWSEC2FleetServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSEC2FleetServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-03-21T00:08:55+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:RequestSpotInstances", + "ec2:DescribeInstanceStatus", + "ec2:RunInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "EC2SpotManagement" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "ec2.amazonaws.com", + "ec2.amazonaws.com.cn" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:instance/*", + "arn:aws:ec2:*:*:spot-instances-request/*" + ] + }, + { + "Action": [ + "ec2:TerminateInstances" + ], + "Condition": { + "StringLike": { + "ec2:ResourceTag/aws:ec2:fleet-id": "*" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJCL355O4TC27CPKVC", + "PolicyName": "AWSEC2FleetServiceRolePolicy", + "UpdateDate": "2018-04-19T21:37:07+00:00", + "VersionId": "v2" + }, + "AWSEC2SpotFleetServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSEC2SpotFleetServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-23T19:13:06+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:RequestSpotInstances", + "ec2:DescribeInstanceStatus", + "ec2:RunInstances" ], "Effect": "Allow", "Resource": [ @@ -1912,14 +5646,39 @@ aws_managed_policies_data = """ "iam:PassRole" ], "Condition": { - "StringLike": { - "iam:PassedToService": "ec2.amazonaws.com" + "StringEquals": { + "iam:PassedToService": [ + "ec2.amazonaws.com", + "ec2.amazonaws.com.cn" + ] } }, "Effect": "Allow", "Resource": [ "*" ] + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:instance/*", + "arn:aws:ec2:*:*:spot-instances-request/*" + ] + }, + { + "Action": [ + "ec2:TerminateInstances" + ], + "Condition": { + "StringLike": { + "ec2:ResourceTag/aws:ec2spot:fleet-request-id": "*" + } + }, + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -1927,10 +5686,85 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAILWCVTZD57EMYWMBO", + "PolicyName": "AWSEC2SpotFleetServiceRolePolicy", + "UpdateDate": "2018-03-28T19:04:33+00:00", + "VersionId": "v3" + }, + "AWSEC2SpotServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSEC2SpotServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-09-18T18:51:54+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeInstances", + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:RunInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:RunInstances" + ], + "Condition": { + "StringNotEquals": { + "ec2:InstanceMarketType": "spot" + } + }, + "Effect": "Deny", + "Resource": [ + "arn:aws:ec2:*:*:instance/*" + ] + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "ec2.amazonaws.com", + "ec2.amazonaws.com.cn" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Condition": { + "StringEquals": { + "ec2:CreateAction": "RunInstances" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIZJJBQNXQYVKTEXGM", "PolicyName": "AWSEC2SpotServiceRolePolicy", - "UpdateDate": "2017-09-18T18:51:54+00:00", - "VersionId": "v1" + "UpdateDate": "2018-12-12T00:13:51+00:00", + "VersionId": "v4" }, "AWSElasticBeanstalkCustomPlatformforEC2Role": { "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkCustomPlatformforEC2Role", @@ -2008,6 +5842,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJRVFXSS6LEIQGBKDY", "PolicyName": "AWSElasticBeanstalkCustomPlatformforEC2Role", "UpdateDate": "2017-02-21T22:50:30+00:00", @@ -2016,8 +5851,8 @@ aws_managed_policies_data = """ "AWSElasticBeanstalkEnhancedHealth": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkEnhancedHealth", "AttachmentCount": 0, - "CreateDate": "2016-08-22T20:28:36+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2016-02-08T23:17:27+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -2036,12 +5871,22 @@ aws_managed_policies_data = """ "autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeAutoScalingInstances", "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeNotificationConfigurations" + "autoscaling:DescribeNotificationConfigurations", + "sns:Publish" ], "Effect": "Allow", "Resource": [ "*" ] + }, + { + "Action": [ + "logs:DescribeLogStreams", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk/*:log-stream:*" } ], "Version": "2012-10-17" @@ -2049,16 +5894,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIH5EFJNMOGUUTKLFE", "PolicyName": "AWSElasticBeanstalkEnhancedHealth", - "UpdateDate": "2016-08-22T20:28:36+00:00", - "VersionId": "v2" + "UpdateDate": "2018-04-09T22:12:53+00:00", + "VersionId": "v4" }, "AWSElasticBeanstalkFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkFullAccess", "AttachmentCount": 0, - "CreateDate": "2016-12-21T01:00:13+00:00", - "DefaultVersionId": "v5", + "CreateDate": "2015-02-06T18:40:18+00:00", + "DefaultVersionId": "v7", "Document": { "Statement": [ { @@ -2107,6 +5953,34 @@ aws_managed_policies_data = """ "arn:aws:iam::*:instance-profile/aws-elasticbeanstalk*" ] }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringLike": { + "iam:AWSServiceName": "autoscaling.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling*" + ] + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringLike": { + "iam:AWSServiceName": "elasticbeanstalk.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/elasticbeanstalk.amazonaws.com/AWSServiceRoleForElasticBeanstalk*" + ] + }, { "Action": [ "iam:AttachRolePolicy" @@ -2128,15 +6002,49 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIZYX2YLLBW2LJVUFW", "PolicyName": "AWSElasticBeanstalkFullAccess", - "UpdateDate": "2016-12-21T01:00:13+00:00", - "VersionId": "v5" + "UpdateDate": "2018-02-23T19:36:01+00:00", + "VersionId": "v7" + }, + "AWSElasticBeanstalkMaintenance": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticBeanstalkMaintenance", + "AttachmentCount": 0, + "CreateDate": "2019-01-11T23:22:52+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": { + "Action": [ + "cloudformation:CreateChangeSet", + "cloudformation:DescribeChangeSet", + "cloudformation:ExecuteChangeSet", + "cloudformation:DeleteChangeSet", + "cloudformation:ListChangeSets", + "cloudformation:DescribeStacks" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/awseb-*", + "arn:aws:cloudformation:*:*:stack/eb-*" + ], + "Sid": "AllowCloudformationChangeSetOperationsOnElasticBeanstalkStacks" + }, + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJQPH22XGBH2VV2LSW", + "PolicyName": "AWSElasticBeanstalkMaintenance", + "UpdateDate": "2019-01-11T23:22:52+00:00", + "VersionId": "v1" }, "AWSElasticBeanstalkMulticontainerDocker": { "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkMulticontainerDocker", "AttachmentCount": 0, - "CreateDate": "2016-06-06T23:45:37+00:00", + "CreateDate": "2016-02-08T23:15:29+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -2163,6 +6071,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ45SBYG72SD6SHJEY", "PolicyName": "AWSElasticBeanstalkMulticontainerDocker", "UpdateDate": "2016-06-06T23:45:37+00:00", @@ -2210,6 +6119,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI47KNGXDAXFD4SDHG", "PolicyName": "AWSElasticBeanstalkReadOnlyAccess", "UpdateDate": "2015-02-06T18:40:19+00:00", @@ -2218,8 +6128,8 @@ aws_managed_policies_data = """ "AWSElasticBeanstalkService": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSElasticBeanstalkService", "AttachmentCount": 0, - "CreateDate": "2017-06-21T16:49:23+00:00", - "DefaultVersionId": "v11", + "CreateDate": "2016-04-11T20:27:23+00:00", + "DefaultVersionId": "v15", "Document": { "Statement": [ { @@ -2254,6 +6164,17 @@ aws_managed_policies_data = """ ], "Sid": "AllowS3OperationsOnElasticBeanstalkBuckets" }, + { + "Action": "ec2:RunInstances", + "Condition": { + "ArnLike": { + "ec2:LaunchTemplate": "arn:aws:ec2:*:*:launch-template/*" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "AllowLaunchTemplateRunInstances" + }, { "Action": [ "autoscaling:AttachInstances", @@ -2271,7 +6192,10 @@ aws_managed_policies_data = """ "autoscaling:DescribeScalingActivities", "autoscaling:DescribeScheduledActions", "autoscaling:DetachInstances", + "autoscaling:DeletePolicy", + "autoscaling:PutScalingPolicy", "autoscaling:PutScheduledUpdateGroupAction", + "autoscaling:PutNotificationConfiguration", "autoscaling:ResumeProcesses", "autoscaling:SetDesiredCapacity", "autoscaling:SuspendProcesses", @@ -2282,6 +6206,12 @@ aws_managed_policies_data = """ "ec2:AllocateAddress", "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateLaunchTemplate", + "ec2:CreateLaunchTemplateVersion", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DeleteLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", "ec2:CreateSecurityGroup", "ec2:DeleteSecurityGroup", "ec2:DescribeAccountAttributes", @@ -2293,6 +6223,9 @@ aws_managed_policies_data = """ "ec2:DescribeSnapshots", "ec2:DescribeSubnets", "ec2:DescribeVpcs", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeVpcClassicLink", "ec2:DisassociateAddress", "ec2:ReleaseAddress", "ec2:RevokeSecurityGroupEgress", @@ -2319,6 +6252,7 @@ aws_managed_policies_data = """ "iam:PassRole", "logs:CreateLogGroup", "logs:PutRetentionPolicy", + "logs:DescribeLogGroups", "rds:DescribeDBEngineVersions", "rds:DescribeDBInstances", "rds:DescribeOrderableDBInstanceOptions", @@ -2353,144 +6287,66 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJKQ5SN74ZQ4WASXBM", "PolicyName": "AWSElasticBeanstalkService", - "UpdateDate": "2017-06-21T16:49:23+00:00", - "VersionId": "v11" + "UpdateDate": "2019-02-05T17:46:21+00:00", + "VersionId": "v15" }, "AWSElasticBeanstalkServiceRolePolicy": { "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticBeanstalkServiceRolePolicy", "AttachmentCount": 0, "CreateDate": "2017-09-13T23:46:37+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v5", "Document": { "Statement": [ { "Action": [ - "iam:PassRole" - ], - "Condition": { - "StringLikeIfExists": { - "iam:PassedToService": "elasticbeanstalk.amazonaws.com" - } - }, - "Effect": "Allow", - "Resource": "*", - "Sid": "AllowPassRoleToElasticBeanstalk" - }, - { - "Action": [ - "cloudformation:*" + "cloudformation:DescribeStackResource", + "cloudformation:DescribeStackResources", + "cloudformation:DescribeStacks" ], "Effect": "Allow", "Resource": [ "arn:aws:cloudformation:*:*:stack/awseb-*", "arn:aws:cloudformation:*:*:stack/eb-*" ], - "Sid": "AllowCloudformationOperationsOnElasticBeanstalkStacks" + "Sid": "AllowCloudformationReadOperationsOnElasticBeanstalkStacks" }, { "Action": [ - "logs:DeleteLogGroup" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*" - ], - "Sid": "AllowDeleteCloudwatchLogGroups" - }, - { - "Action": [ - "s3:*" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:s3:::elasticbeanstalk-*", - "arn:aws:s3:::elasticbeanstalk-*/*" - ], - "Sid": "AllowS3OperationsOnElasticBeanstalkBuckets" - }, - { - "Action": [ - "autoscaling:AttachInstances", - "autoscaling:CreateAutoScalingGroup", - "autoscaling:CreateLaunchConfiguration", - "autoscaling:DeleteLaunchConfiguration", - "autoscaling:DeleteAutoScalingGroup", - "autoscaling:DeleteScheduledAction", - "autoscaling:DescribeAccountLimits", "autoscaling:DescribeAutoScalingGroups", "autoscaling:DescribeAutoScalingInstances", - "autoscaling:DescribeLaunchConfigurations", - "autoscaling:DescribeLoadBalancers", "autoscaling:DescribeNotificationConfigurations", "autoscaling:DescribeScalingActivities", - "autoscaling:DescribeScheduledActions", - "autoscaling:DetachInstances", - "autoscaling:PutScheduledUpdateGroupAction", - "autoscaling:ResumeProcesses", - "autoscaling:SetDesiredCapacity", - "autoscaling:SuspendProcesses", - "autoscaling:TerminateInstanceInAutoScalingGroup", - "autoscaling:UpdateAutoScalingGroup", - "cloudwatch:PutMetricAlarm", + "autoscaling:PutNotificationConfiguration", + "ec2:DescribeInstanceStatus", "ec2:AssociateAddress", - "ec2:AllocateAddress", - "ec2:AuthorizeSecurityGroupEgress", - "ec2:AuthorizeSecurityGroupIngress", - "ec2:CreateSecurityGroup", - "ec2:DeleteSecurityGroup", - "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", - "ec2:DescribeImages", "ec2:DescribeInstances", - "ec2:DescribeKeyPairs", "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVpcs", - "ec2:DisassociateAddress", - "ec2:ReleaseAddress", - "ec2:RevokeSecurityGroupEgress", - "ec2:RevokeSecurityGroupIngress", - "ec2:TerminateInstances", - "ecs:CreateCluster", - "ecs:DeleteCluster", - "ecs:DescribeClusters", - "ecs:RegisterTaskDefinition", - "elasticbeanstalk:*", - "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", - "elasticloadbalancing:ConfigureHealthCheck", - "elasticloadbalancing:CreateLoadBalancer", - "elasticloadbalancing:DeleteLoadBalancer", - "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", "elasticloadbalancing:DescribeInstanceHealth", "elasticloadbalancing:DescribeLoadBalancers", "elasticloadbalancing:DescribeTargetHealth", - "elasticloadbalancing:RegisterInstancesWithLoadBalancer", "elasticloadbalancing:DescribeTargetGroups", - "elasticloadbalancing:RegisterTargets", - "elasticloadbalancing:DeregisterTargets", - "iam:ListRoles", - "logs:CreateLogGroup", - "logs:PutRetentionPolicy", - "rds:DescribeDBInstances", - "rds:DescribeOrderableDBInstanceOptions", - "rds:DescribeDBEngineVersions", - "sns:ListTopics", - "sns:GetTopicAttributes", - "sns:ListSubscriptionsByTopic", "sqs:GetQueueAttributes", "sqs:GetQueueUrl", - "codebuild:CreateProject", - "codebuild:DeleteProject", - "codebuild:BatchGetBuilds", - "codebuild:StartBuild" + "sns:Publish" ], "Effect": "Allow", "Resource": [ "*" ], "Sid": "AllowOperations" + }, + { + "Action": [ + "logs:DescribeLogStreams", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk/*:log-stream:*" } ], "Version": "2012-10-17" @@ -2498,16 +6354,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIID62QSI3OSIPQXTM", "PolicyName": "AWSElasticBeanstalkServiceRolePolicy", - "UpdateDate": "2017-09-13T23:46:37+00:00", - "VersionId": "v1" + "UpdateDate": "2018-04-09T22:06:23+00:00", + "VersionId": "v5" }, "AWSElasticBeanstalkWebTier": { "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkWebTier", "AttachmentCount": 0, - "CreateDate": "2016-12-21T02:06:25+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2016-02-08T23:08:54+00:00", + "DefaultVersionId": "v6", "Document": { "Statement": [ { @@ -2526,7 +6383,10 @@ aws_managed_policies_data = """ { "Action": [ "xray:PutTraceSegments", - "xray:PutTelemetryRecords" + "xray:PutTelemetryRecords", + "xray:GetSamplingRules", + "xray:GetSamplingTargets", + "xray:GetSamplingStatisticSummaries" ], "Effect": "Allow", "Resource": "*", @@ -2535,7 +6395,9 @@ aws_managed_policies_data = """ { "Action": [ "logs:PutLogEvents", - "logs:CreateLogStream" + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:DescribeLogGroups" ], "Effect": "Allow", "Resource": [ @@ -2549,16 +6411,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIUF4325SJYOREKW3A", "PolicyName": "AWSElasticBeanstalkWebTier", - "UpdateDate": "2016-12-21T02:06:25+00:00", - "VersionId": "v4" + "UpdateDate": "2019-03-01T00:04:49+00:00", + "VersionId": "v6" }, "AWSElasticBeanstalkWorkerTier": { "Arn": "arn:aws:iam::aws:policy/AWSElasticBeanstalkWorkerTier", "AttachmentCount": 0, - "CreateDate": "2016-12-21T02:01:55+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2016-02-08T23:12:02+00:00", + "DefaultVersionId": "v5", "Document": { "Statement": [ { @@ -2572,7 +6435,10 @@ aws_managed_policies_data = """ { "Action": [ "xray:PutTraceSegments", - "xray:PutTelemetryRecords" + "xray:PutTelemetryRecords", + "xray:GetSamplingRules", + "xray:GetSamplingTargets", + "xray:GetSamplingStatisticSummaries" ], "Effect": "Allow", "Resource": "*", @@ -2636,10 +6502,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJQDLBRSJVKVF4JMSK", "PolicyName": "AWSElasticBeanstalkWorkerTier", - "UpdateDate": "2016-12-21T02:01:55+00:00", - "VersionId": "v4" + "UpdateDate": "2019-03-01T00:07:00+00:00", + "VersionId": "v5" }, "AWSElasticLoadBalancingClassicServiceRolePolicy": { "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticLoadBalancingClassicServiceRolePolicy", @@ -2682,6 +6549,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIUMWW3QP7DPZPNVU4", "PolicyName": "AWSElasticLoadBalancingClassicServiceRolePolicy", "UpdateDate": "2017-09-19T22:36:18+00:00", @@ -2691,13 +6559,14 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSElasticLoadBalancingServiceRolePolicy", "AttachmentCount": 0, "CreateDate": "2017-09-19T22:19:04+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { "Action": [ "ec2:DescribeAddresses", "ec2:DescribeInstances", + "ec2:DescribeNetworkInterfaces", "ec2:DescribeSubnets", "ec2:DescribeSecurityGroups", "ec2:DescribeVpcs", @@ -2717,7 +6586,12 @@ aws_managed_policies_data = """ "ec2:DetachNetworkInterface", "ec2:AssignPrivateIpAddresses", "ec2:AssignIpv6Addresses", - "ec2:UnassignIpv6Addresses" + "ec2:UnassignIpv6Addresses", + "logs:CreateLogDelivery", + "logs:GetLogDelivery", + "logs:UpdateLogDelivery", + "logs:DeleteLogDelivery", + "logs:ListLogDeliveries" ], "Effect": "Allow", "Resource": "*" @@ -2728,9 +6602,195 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIMHWGGSRHLOQUICJQ", "PolicyName": "AWSElasticLoadBalancingServiceRolePolicy", - "UpdateDate": "2017-09-19T22:19:04+00:00", + "UpdateDate": "2019-03-18T21:51:14+00:00", + "VersionId": "v3" + }, + "AWSElementalMediaConvertFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSElementalMediaConvertFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-06-25T19:25:35+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mediaconvert:*", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:ListObjects" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "mediaconvert.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIXDREOCL6LV7RBJWC", + "PolicyName": "AWSElementalMediaConvertFullAccess", + "UpdateDate": "2018-06-25T19:25:35+00:00", + "VersionId": "v1" + }, + "AWSElementalMediaConvertReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSElementalMediaConvertReadOnly", + "AttachmentCount": 0, + "CreateDate": "2018-06-25T19:25:14+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mediaconvert:Get*", + "mediaconvert:List*", + "mediaconvert:DescribeEndpoints", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:ListObjects" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJSXYOBSLJN3JEDO42", + "PolicyName": "AWSElementalMediaConvertReadOnly", + "UpdateDate": "2018-06-25T19:25:14+00:00", + "VersionId": "v1" + }, + "AWSElementalMediaPackageFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSElementalMediaPackageFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-12-29T23:39:52+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": { + "Action": "mediapackage:*", + "Effect": "Allow", + "Resource": "*" + }, + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIYI6IYR3JRFAVNQHC", + "PolicyName": "AWSElementalMediaPackageFullAccess", + "UpdateDate": "2017-12-29T23:39:52+00:00", + "VersionId": "v1" + }, + "AWSElementalMediaPackageReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSElementalMediaPackageReadOnly", + "AttachmentCount": 0, + "CreateDate": "2017-12-30T00:04:29+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": { + "Action": [ + "mediapackage:List*", + "mediapackage:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + }, + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ42DVTPUHKXNYZQCO", + "PolicyName": "AWSElementalMediaPackageReadOnly", + "UpdateDate": "2017-12-30T00:04:29+00:00", + "VersionId": "v1" + }, + "AWSElementalMediaStoreFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSElementalMediaStoreFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-03-05T23:15:31+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mediastore:*" + ], + "Condition": { + "Bool": { + "aws:SecureTransport": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJZFYFW2QXSNK7OH6Y", + "PolicyName": "AWSElementalMediaStoreFullAccess", + "UpdateDate": "2018-03-05T23:15:31+00:00", + "VersionId": "v1" + }, + "AWSElementalMediaStoreReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSElementalMediaStoreReadOnly", + "AttachmentCount": 0, + "CreateDate": "2018-03-08T19:48:22+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mediastore:Get*", + "mediastore:List*", + "mediastore:Describe*" + ], + "Condition": { + "Bool": { + "aws:SecureTransport": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI4EFXRATQYOFTAEFM", + "PolicyName": "AWSElementalMediaStoreReadOnly", + "UpdateDate": "2018-03-08T19:48:22+00:00", "VersionId": "v1" }, "AWSEnhancedClassicNetworkingMangementPolicy": { @@ -2754,16 +6814,355 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI7T4V2HZTS72QVO52", "PolicyName": "AWSEnhancedClassicNetworkingMangementPolicy", "UpdateDate": "2017-09-20T17:29:09+00:00", "VersionId": "v1" }, + "AWSFMAdminFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSFMAdminFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-05-09T18:06:18+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "fms:*", + "waf:*", + "waf-regional:*", + "elasticloadbalancing:SetWebACL", + "organizations:DescribeOrganization" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJLAGM5X6WSNPF4EAQ", + "PolicyName": "AWSFMAdminFullAccess", + "UpdateDate": "2018-05-09T18:06:18+00:00", + "VersionId": "v1" + }, + "AWSFMAdminReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSFMAdminReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-05-09T20:07:39+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "fms:Get*", + "fms:List*", + "waf:Get*", + "waf:List*", + "waf-regional:Get*", + "waf-regional:List*", + "organizations:DescribeOrganization" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJA3UKVVBN62QFIKLW", + "PolicyName": "AWSFMAdminReadOnlyAccess", + "UpdateDate": "2018-05-09T20:07:39+00:00", + "VersionId": "v1" + }, + "AWSFMMemberReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSFMMemberReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-05-09T21:05:29+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "fms:GetAdminAccount", + "waf:Get*", + "waf:List*", + "waf-regional:Get*", + "waf-regional:List*", + "organizations:DescribeOrganization" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIB2IVAQ4XXNHHA3DW", + "PolicyName": "AWSFMMemberReadOnlyAccess", + "UpdateDate": "2018-05-09T21:05:29+00:00", + "VersionId": "v1" + }, + "AWSGlobalAcceleratorSLRPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSGlobalAcceleratorSLRPolicy", + "AttachmentCount": 0, + "CreateDate": "2019-04-05T19:39:13+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DeleteNetworkInterface" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:DeleteSecurityGroup", + "Condition": { + "StringEquals": { + "ec2:ResourceTag/AWSServiceName": "GlobalAccelerator" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:DescribeSecurityGroups" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "elasticloadbalancing:DescribeLoadBalancers", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateTags", + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:network-interface/*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4EJ5ZEQR2C", + "PolicyName": "AWSGlobalAcceleratorSLRPolicy", + "UpdateDate": "2019-04-05T19:39:13+00:00", + "VersionId": "v1" + }, "AWSGlueConsoleFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSGlueConsoleFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-09-13T00:12:54+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2017-08-14T13:37:39+00:00", + "DefaultVersionId": "v12", + "Document": { + "Statement": [ + { + "Action": [ + "glue:*", + "redshift:DescribeClusters", + "redshift:DescribeClusterSubnetGroups", + "iam:ListRoles", + "iam:ListUsers", + "iam:ListGroups", + "iam:ListRolePolicies", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:ListAttachedRolePolicies", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeRouteTables", + "ec2:DescribeVpcAttribute", + "ec2:DescribeKeyPairs", + "ec2:DescribeInstances", + "ec2:DescribeImages", + "rds:DescribeDBInstances", + "rds:DescribeDBClusters", + "rds:DescribeDBSubnetGroups", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:GetBucketAcl", + "s3:GetBucketLocation", + "cloudformation:DescribeStacks", + "cloudformation:GetTemplateSummary", + "dynamodb:ListTables", + "kms:ListAliases", + "kms:DescribeKey", + "cloudwatch:GetMetricData", + "cloudwatch:ListDashboards" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:GetObject", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-glue-*/*", + "arn:aws:s3:::*/*aws-glue-*/*", + "arn:aws:s3:::aws-glue-*" + ] + }, + { + "Action": [ + "tag:GetResources" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:CreateBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-glue-*" + ] + }, + { + "Action": [ + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:/aws-glue/*" + ] + }, + { + "Action": [ + "cloudformation:CreateStack", + "cloudformation:DeleteStack" + ], + "Effect": "Allow", + "Resource": "arn:aws:cloudformation:*:*:stack/aws-glue*/*" + }, + { + "Action": [ + "ec2:RunInstances" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:instance/*", + "arn:aws:ec2:*:*:key-pair/*", + "arn:aws:ec2:*:*:image/*", + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:network-interface/*", + "arn:aws:ec2:*:*:subnet/*", + "arn:aws:ec2:*:*:volume/*" + ] + }, + { + "Action": [ + "ec2:TerminateInstances", + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "StringEquals": { + "ec2:ResourceTag/aws:cloudformation:logical-id": "ZeppelinInstance" + }, + "StringLike": { + "ec2:ResourceTag/aws:cloudformation:stack-id": "arn:aws:cloudformation:*:*:stack/aws-glue-*/*" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:instance/*" + ] + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "glue.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/AWSGlueServiceRole*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "ec2.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/AWSGlueServiceNotebookRole*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "glue.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/service-role/AWSGlueServiceRole*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJNZGDEOD7MISOVSVI", + "PolicyName": "AWSGlueConsoleFullAccess", + "UpdateDate": "2019-02-11T19:49:01+00:00", + "VersionId": "v12" + }, + "AWSGlueConsoleSageMakerNotebookFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSGlueConsoleSageMakerNotebookFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-10-05T17:52:35+00:00", + "DefaultVersionId": "v1", "Document": { "Statement": [ { @@ -2775,6 +7174,7 @@ aws_managed_policies_data = """ "iam:ListRolePolicies", "iam:GetRole", "iam:GetRolePolicy", + "iam:ListAttachedRolePolicies", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", @@ -2783,13 +7183,29 @@ aws_managed_policies_data = """ "ec2:DescribeVpcAttribute", "ec2:DescribeKeyPairs", "ec2:DescribeInstances", + "ec2:DescribeImages", + "ec2:CreateNetworkInterface", + "ec2:AttachNetworkInterface", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DeleteNetworkInterface", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeNetworkInterfaces", "rds:DescribeDBInstances", "s3:ListAllMyBuckets", "s3:ListBucket", "s3:GetBucketAcl", "s3:GetBucketLocation", "cloudformation:DescribeStacks", - "cloudformation:GetTemplateSummary" + "cloudformation:GetTemplateSummary", + "dynamodb:ListTables", + "kms:ListAliases", + "kms:DescribeKey", + "sagemaker:ListNotebookInstances", + "sagemaker:ListNotebookInstanceLifecycleConfigs", + "cloudformation:ListStacks", + "cloudwatch:GetMetricData", + "cloudwatch:ListDashboards" ], "Effect": "Allow", "Resource": [ @@ -2834,18 +7250,64 @@ aws_managed_policies_data = """ "Effect": "Allow", "Resource": "arn:aws:cloudformation:*:*:stack/aws-glue*/*" }, + { + "Action": [ + "sagemaker:CreatePresignedNotebookInstanceUrl", + "sagemaker:CreateNotebookInstance", + "sagemaker:DeleteNotebookInstance", + "sagemaker:DescribeNotebookInstance", + "sagemaker:DescribeNotebookInstanceLifecycleConfig", + "sagemaker:DeleteNotebookInstanceLifecycleConfig", + "sagemaker:StartNotebookInstance", + "sagemaker:CreateNotebookInstanceLifecycleConfig", + "sagemaker:StopNotebookInstance", + "sagemaker:UpdateNotebookInstance", + "sagemaker:ListTags" + ], + "Effect": "Allow", + "Resource": "arn:aws:sagemaker:*:*:notebook-instance/aws-glue-*" + }, + { + "Action": [ + "ec2:RunInstances" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:instance/*", + "arn:aws:ec2:*:*:key-pair/*", + "arn:aws:ec2:*:*:image/*", + "arn:aws:ec2:*:*:security-group/*", + "arn:aws:ec2:*:*:network-interface/*", + "arn:aws:ec2:*:*:subnet/*", + "arn:aws:ec2:*:*:volume/*" + ] + }, { "Action": [ "ec2:TerminateInstances", - "ec2:RunInstances", "ec2:CreateTags", "ec2:DeleteTags" ], "Condition": { - "ForAllValues:StringEquals": { - "aws:TagKeys": [ - "aws-glue-dev-endpoint" - ] + "StringEquals": { + "ec2:ResourceTag/aws:cloudformation:logical-id": "ZeppelinInstance" + }, + "StringLike": { + "ec2:ResourceTag/aws:cloudformation:stack-id": "arn:aws:cloudformation:*:*:stack/aws-glue-*/*" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:instance/*" + ] + }, + { + "Action": [ + "tag:GetResources" + ], + "Condition": { + "StringEquals": { + "aws:TagKeys": "aws-glue-*" } }, "Effect": "Allow", @@ -2880,6 +7342,36 @@ aws_managed_policies_data = """ }, "Effect": "Allow", "Resource": "arn:aws:iam::*:role/AWSGlueServiceNotebookRole*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "sagemaker.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/AWSGlueServiceSageMakerNotebookRole*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "glue.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/service-role/AWSGlueServiceRole*" + ] } ], "Version": "2012-10-17" @@ -2887,15 +7379,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", - "PolicyId": "ANPAJNZGDEOD7MISOVSVI", - "PolicyName": "AWSGlueConsoleFullAccess", - "UpdateDate": "2017-09-13T00:12:54+00:00", - "VersionId": "v2" + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJELFOHJC42QS3ZSYY", + "PolicyName": "AWSGlueConsoleSageMakerNotebookFullAccess", + "UpdateDate": "2018-10-05T17:52:35+00:00", + "VersionId": "v1" }, "AWSGlueServiceNotebookRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSGlueServiceNotebookRole", "AttachmentCount": 0, - "CreateDate": "2017-08-17T18:08:29+00:00", + "CreateDate": "2017-08-14T13:37:42+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -3000,6 +7493,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIMRC6VZUHJYCTKWFI", "PolicyName": "AWSGlueServiceNotebookRole", "UpdateDate": "2017-08-17T18:08:29+00:00", @@ -3008,8 +7502,8 @@ aws_managed_policies_data = """ "AWSGlueServiceRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole", "AttachmentCount": 0, - "CreateDate": "2017-08-23T21:35:25+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2017-08-14T13:37:21+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -3029,7 +7523,8 @@ aws_managed_policies_data = """ "ec2:DescribeVpcAttribute", "iam:ListRolePolicies", "iam:GetRole", - "iam:GetRolePolicy" + "iam:GetRolePolicy", + "cloudwatch:PutMetricData" ], "Effect": "Allow", "Resource": [ @@ -3103,10 +7598,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIRUJCPEBPMEZFAS32", "PolicyName": "AWSGlueServiceRole", - "UpdateDate": "2017-08-23T21:35:25+00:00", - "VersionId": "v3" + "UpdateDate": "2018-06-25T18:23:09+00:00", + "VersionId": "v4" }, "AWSGreengrassFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSGreengrassFullAccess", @@ -3128,16 +7624,44 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJWPV6OBK4QONH4J3O", "PolicyName": "AWSGreengrassFullAccess", "UpdateDate": "2017-05-03T00:47:37+00:00", "VersionId": "v1" }, + "AWSGreengrassReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSGreengrassReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-10-30T16:01:43+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "greengrass:List*", + "greengrass:Get*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJLSKLXFVTQTZ5GY3I", + "PolicyName": "AWSGreengrassReadOnlyAccess", + "UpdateDate": "2018-10-30T16:01:43+00:00", + "VersionId": "v1" + }, "AWSGreengrassResourceAccessRolePolicy": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSGreengrassResourceAccessRolePolicy", "AttachmentCount": 0, - "CreateDate": "2017-05-26T23:10:54+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2017-02-14T21:17:24+00:00", + "DefaultVersionId": "v5", "Document": { "Statement": [ { @@ -3187,6 +7711,47 @@ aws_managed_policies_data = """ "Effect": "Allow", "Resource": "*", "Sid": "AllowGreengrassToGetLambdaFunctions" + }, + { + "Action": [ + "secretsmanager:GetSecretValue" + ], + "Effect": "Allow", + "Resource": "arn:aws:secretsmanager:*:*:secret:greengrass-*", + "Sid": "AllowGreengrassToGetGreengrassSecrets" + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*Greengrass*", + "arn:aws:s3:::*GreenGrass*", + "arn:aws:s3:::*greengrass*", + "arn:aws:s3:::*Sagemaker*", + "arn:aws:s3:::*SageMaker*", + "arn:aws:s3:::*sagemaker*" + ], + "Sid": "AllowGreengrassAccessToS3Objects" + }, + { + "Action": [ + "s3:GetBucketLocation" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "AllowGreengrassAccessToS3BucketLocation" + }, + { + "Action": [ + "sagemaker:DescribeTrainingJob" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sagemaker:*:*:training-job/*" + ], + "Sid": "AllowGreengrassAccessToSageMakerTrainingJobs" } ], "Version": "2012-10-17" @@ -3194,10 +7759,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJPKEIMB6YMXDEVRTM", "PolicyName": "AWSGreengrassResourceAccessRolePolicy", - "UpdateDate": "2017-05-26T23:10:54+00:00", - "VersionId": "v3" + "UpdateDate": "2018-11-14T00:35:02+00:00", + "VersionId": "v5" }, "AWSHealthFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSHealthFullAccess", @@ -3219,11 +7785,38 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI3CUMPCPEUPCSXC4Y", "PolicyName": "AWSHealthFullAccess", "UpdateDate": "2016-12-06T12:30:31+00:00", "VersionId": "v1" }, + "AWSIQFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIQFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-04-04T23:13:42+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iq:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4P4TAHETXT", + "PolicyName": "AWSIQFullAccess", + "UpdateDate": "2019-04-04T23:13:42+00:00", + "VersionId": "v1" + }, "AWSImportExportFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSImportExportFullAccess", "AttachmentCount": 0, @@ -3244,6 +7837,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJCQCT4JGTLC6722MQ", "PolicyName": "AWSImportExportFullAccess", "UpdateDate": "2015-02-06T18:40:43+00:00", @@ -3270,74 +7864,283 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJNTV4OG52ESYZHCNK", "PolicyName": "AWSImportExportReadOnlyAccess", "UpdateDate": "2015-02-06T18:40:42+00:00", "VersionId": "v1" }, + "AWSIoT1ClickFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoT1ClickFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-05-11T22:10:14+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iot1click:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJPQNJPDUDESCCAMIA", + "PolicyName": "AWSIoT1ClickFullAccess", + "UpdateDate": "2018-05-11T22:10:14+00:00", + "VersionId": "v1" + }, + "AWSIoT1ClickReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoT1ClickReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-05-11T21:49:24+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iot1click:Describe*", + "iot1click:Get*", + "iot1click:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI35VTLD3EBNY2JGXS", + "PolicyName": "AWSIoT1ClickReadOnlyAccess", + "UpdateDate": "2018-05-11T21:49:24+00:00", + "VersionId": "v1" + }, + "AWSIoTAnalyticsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTAnalyticsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-06-18T23:02:45+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iotanalytics:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ7FB5ZEKQN445QGKY", + "PolicyName": "AWSIoTAnalyticsFullAccess", + "UpdateDate": "2018-06-18T23:02:45+00:00", + "VersionId": "v1" + }, + "AWSIoTAnalyticsReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTAnalyticsReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-06-18T21:37:49+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iotanalytics:Describe*", + "iotanalytics:List*", + "iotanalytics:Get*", + "iotanalytics:SampleChannelData" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ3Z4LYBELMXGFLGMI", + "PolicyName": "AWSIoTAnalyticsReadOnlyAccess", + "UpdateDate": "2018-06-18T21:37:49+00:00", + "VersionId": "v1" + }, "AWSIoTConfigAccess": { "Arn": "arn:aws:iam::aws:policy/AWSIoTConfigAccess", "AttachmentCount": 0, - "CreateDate": "2016-07-27T20:41:18+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2015-10-27T21:52:07+00:00", + "DefaultVersionId": "v8", "Document": { "Statement": [ { "Action": [ "iot:AcceptCertificateTransfer", + "iot:AddThingToThingGroup", + "iot:AssociateTargetsWithJob", + "iot:AttachPolicy", "iot:AttachPrincipalPolicy", "iot:AttachThingPrincipal", "iot:CancelCertificateTransfer", + "iot:CancelJob", + "iot:CancelJobExecution", + "iot:ClearDefaultAuthorizer", + "iot:CreateAuthorizer", "iot:CreateCertificateFromCsr", + "iot:CreateJob", "iot:CreateKeysAndCertificate", + "iot:CreateOTAUpdate", "iot:CreatePolicy", "iot:CreatePolicyVersion", + "iot:CreateRoleAlias", + "iot:CreateStream", "iot:CreateThing", + "iot:CreateThingGroup", "iot:CreateThingType", "iot:CreateTopicRule", - "iot:DeleteCertificate", + "iot:DeleteAuthorizer", "iot:DeleteCACertificate", + "iot:DeleteCertificate", + "iot:DeleteJob", + "iot:DeleteJobExecution", + "iot:DeleteOTAUpdate", "iot:DeletePolicy", "iot:DeletePolicyVersion", "iot:DeleteRegistrationCode", + "iot:DeleteRoleAlias", + "iot:DeleteStream", "iot:DeleteThing", + "iot:DeleteThingGroup", "iot:DeleteThingType", "iot:DeleteTopicRule", + "iot:DeleteV2LoggingLevel", "iot:DeprecateThingType", - "iot:DescribeCertificate", + "iot:DescribeAuthorizer", "iot:DescribeCACertificate", + "iot:DescribeCertificate", + "iot:DescribeCertificateTag", + "iot:DescribeDefaultAuthorizer", "iot:DescribeEndpoint", + "iot:DescribeEventConfigurations", + "iot:DescribeIndex", + "iot:DescribeJob", + "iot:DescribeJobExecution", + "iot:DescribeRoleAlias", + "iot:DescribeStream", "iot:DescribeThing", + "iot:DescribeThingGroup", + "iot:DescribeThingRegistrationTask", "iot:DescribeThingType", + "iot:DetachPolicy", "iot:DetachPrincipalPolicy", "iot:DetachThingPrincipal", + "iot:DisableTopicRule", + "iot:EnableTopicRule", + "iot:GetEffectivePolicies", + "iot:GetIndexingConfiguration", + "iot:GetJobDocument", "iot:GetLoggingOptions", + "iot:GetOTAUpdate", "iot:GetPolicy", "iot:GetPolicyVersion", "iot:GetRegistrationCode", "iot:GetTopicRule", - "iot:ListCertificates", + "iot:GetV2LoggingOptions", + "iot:ListAttachedPolicies", + "iot:ListAuthorizers", "iot:ListCACertificates", + "iot:ListCertificates", "iot:ListCertificatesByCA", + "iot:ListIndices", + "iot:ListJobExecutionsForJob", + "iot:ListJobExecutionsForThing", + "iot:ListJobs", + "iot:ListOTAUpdates", + "iot:ListOutgoingCertificates", "iot:ListPolicies", "iot:ListPolicyPrincipals", "iot:ListPolicyVersions", "iot:ListPrincipalPolicies", "iot:ListPrincipalThings", + "iot:ListRoleAliases", + "iot:ListStreams", + "iot:ListTargetsForPolicy", + "iot:ListThingGroups", + "iot:ListThingGroupsForThing", "iot:ListThingPrincipals", + "iot:ListThingRegistrationTaskReports", + "iot:ListThingRegistrationTasks", "iot:ListThings", + "iot:ListThingsInThingGroup", "iot:ListThingTypes", "iot:ListTopicRules", - "iot:RegisterCertificate", + "iot:ListV2LoggingLevels", "iot:RegisterCACertificate", + "iot:RegisterCertificate", + "iot:RegisterThing", "iot:RejectCertificateTransfer", + "iot:RemoveThingFromThingGroup", "iot:ReplaceTopicRule", + "iot:SearchIndex", + "iot:SetDefaultAuthorizer", "iot:SetDefaultPolicyVersion", "iot:SetLoggingOptions", + "iot:SetV2LoggingLevel", + "iot:SetV2LoggingOptions", + "iot:StartThingRegistrationTask", + "iot:StopThingRegistrationTask", + "iot:TestAuthorization", + "iot:TestInvokeAuthorizer", "iot:TransferCertificate", - "iot:UpdateCertificate", + "iot:UpdateAuthorizer", "iot:UpdateCACertificate", - "iot:UpdateThing" + "iot:UpdateCertificate", + "iot:UpdateCertificateTag", + "iot:UpdateEventConfigurations", + "iot:UpdateIndexingConfiguration", + "iot:UpdateRoleAlias", + "iot:UpdateStream", + "iot:UpdateThing", + "iot:UpdateThingGroup", + "iot:UpdateThingGroupsForThing", + "iot:UpdateAccountAuditConfiguration", + "iot:DescribeAccountAuditConfiguration", + "iot:DeleteAccountAuditConfiguration", + "iot:StartOnDemandAuditTask", + "iot:CancelAuditTask", + "iot:DescribeAuditTask", + "iot:ListAuditTasks", + "iot:CreateScheduledAudit", + "iot:UpdateScheduledAudit", + "iot:DeleteScheduledAudit", + "iot:DescribeScheduledAudit", + "iot:ListScheduledAudits", + "iot:ListAuditFindings", + "iot:CreateSecurityProfile", + "iot:DescribeSecurityProfile", + "iot:UpdateSecurityProfile", + "iot:DeleteSecurityProfile", + "iot:AttachSecurityProfile", + "iot:DetachSecurityProfile", + "iot:ListSecurityProfiles", + "iot:ListSecurityProfilesForTarget", + "iot:ListTargetsForSecurityProfile", + "iot:ListActiveViolations", + "iot:ListViolationEvents", + "iot:ValidateSecurityProfileBehaviors" ], "Effect": "Allow", "Resource": "*" @@ -3348,42 +8151,92 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIWWGD4LM4EMXNRL7I", "PolicyName": "AWSIoTConfigAccess", - "UpdateDate": "2016-07-27T20:41:18+00:00", - "VersionId": "v4" + "UpdateDate": "2018-10-01T17:22:32+00:00", + "VersionId": "v8" }, "AWSIoTConfigReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AWSIoTConfigReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-07-27T20:41:36+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2015-10-27T21:52:31+00:00", + "DefaultVersionId": "v7", "Document": { "Statement": [ { "Action": [ - "iot:DescribeCertificate", + "iot:DescribeAuthorizer", "iot:DescribeCACertificate", + "iot:DescribeCertificate", + "iot:DescribeCertificateTag", + "iot:DescribeDefaultAuthorizer", "iot:DescribeEndpoint", + "iot:DescribeEventConfigurations", + "iot:DescribeIndex", + "iot:DescribeJob", + "iot:DescribeJobExecution", + "iot:DescribeRoleAlias", + "iot:DescribeStream", "iot:DescribeThing", + "iot:DescribeThingGroup", + "iot:DescribeThingRegistrationTask", "iot:DescribeThingType", + "iot:GetEffectivePolicies", + "iot:GetIndexingConfiguration", + "iot:GetJobDocument", "iot:GetLoggingOptions", + "iot:GetOTAUpdate", "iot:GetPolicy", "iot:GetPolicyVersion", "iot:GetRegistrationCode", "iot:GetTopicRule", + "iot:GetV2LoggingOptions", + "iot:ListAttachedPolicies", + "iot:ListAuthorizers", + "iot:ListCACertificates", "iot:ListCertificates", "iot:ListCertificatesByCA", - "iot:ListCACertificates", + "iot:ListIndices", + "iot:ListJobExecutionsForJob", + "iot:ListJobExecutionsForThing", + "iot:ListJobs", + "iot:ListOTAUpdates", + "iot:ListOutgoingCertificates", "iot:ListPolicies", "iot:ListPolicyPrincipals", "iot:ListPolicyVersions", "iot:ListPrincipalPolicies", "iot:ListPrincipalThings", + "iot:ListRoleAliases", + "iot:ListStreams", + "iot:ListTargetsForPolicy", + "iot:ListThingGroups", + "iot:ListThingGroupsForThing", "iot:ListThingPrincipals", + "iot:ListThingRegistrationTaskReports", + "iot:ListThingRegistrationTasks", "iot:ListThings", + "iot:ListThingsInThingGroup", "iot:ListThingTypes", - "iot:ListTopicRules" + "iot:ListTopicRules", + "iot:ListV2LoggingLevels", + "iot:SearchIndex", + "iot:TestAuthorization", + "iot:TestInvokeAuthorizer", + "iot:DescribeAccountAuditConfiguration", + "iot:DescribeAuditTask", + "iot:ListAuditTasks", + "iot:DescribeScheduledAudit", + "iot:ListScheduledAudits", + "iot:ListAuditFindings", + "iot:DescribeSecurityProfile", + "iot:ListSecurityProfiles", + "iot:ListSecurityProfilesForTarget", + "iot:ListTargetsForSecurityProfile", + "iot:ListActiveViolations", + "iot:ListViolationEvents", + "iot:ValidateSecurityProfileBehaviors" ], "Effect": "Allow", "Resource": "*" @@ -3394,16 +8247,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJHENEMXGX4XMFOIOI", "PolicyName": "AWSIoTConfigReadOnlyAccess", - "UpdateDate": "2016-07-27T20:41:36+00:00", - "VersionId": "v4" + "UpdateDate": "2018-07-18T21:22:11+00:00", + "VersionId": "v7" }, "AWSIoTDataAccess": { "Arn": "arn:aws:iam::aws:policy/AWSIoTDataAccess", "AttachmentCount": 0, "CreateDate": "2015-10-27T21:51:18+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -3413,7 +8267,8 @@ aws_managed_policies_data = """ "iot:Subscribe", "iot:Receive", "iot:GetThingShadow", - "iot:UpdateThingShadow" + "iot:UpdateThingShadow", + "iot:DeleteThingShadow" ], "Effect": "Allow", "Resource": "*" @@ -3424,9 +8279,106 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJM2KI2UJDR24XPS2K", "PolicyName": "AWSIoTDataAccess", - "UpdateDate": "2015-10-27T21:51:18+00:00", + "UpdateDate": "2017-11-16T18:24:11+00:00", + "VersionId": "v2" + }, + "AWSIoTDeviceDefenderAudit": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSIoTDeviceDefenderAudit", + "AttachmentCount": 0, + "CreateDate": "2018-07-18T21:17:40+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iot:GetLoggingOptions", + "iot:GetV2LoggingOptions", + "iot:ListCACertificates", + "iot:ListCertificates", + "iot:DescribeCACertificate", + "iot:DescribeCertificate", + "iot:ListPolicies", + "iot:GetPolicy", + "iot:GetEffectivePolicies", + "cognito-identity:GetIdentityPoolRoles", + "iam:ListRolePolicies", + "iam:ListAttachedRolePolicies", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:GetRolePolicy" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJKUN6OAGIHZ66TRKO", + "PolicyName": "AWSIoTDeviceDefenderAudit", + "UpdateDate": "2018-07-18T21:17:40+00:00", + "VersionId": "v1" + }, + "AWSIoTEventsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTEventsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-10T22:51:57+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iotevents:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJGA726P7LVUWJZ2LM", + "PolicyName": "AWSIoTEventsFullAccess", + "UpdateDate": "2019-01-10T22:51:57+00:00", + "VersionId": "v1" + }, + "AWSIoTEventsReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTEventsReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-10T22:50:08+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iotevents:Describe*", + "iotevents:List*", + "iotevents:Get*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJYJFNAR7CN5JW52PG", + "PolicyName": "AWSIoTEventsReadOnlyAccess", + "UpdateDate": "2019-01-10T22:50:08+00:00", "VersionId": "v1" }, "AWSIoTFullAccess": { @@ -3449,6 +8401,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJU2FPGG6PQWN72V2G", "PolicyName": "AWSIoTFullAccess", "UpdateDate": "2015-10-08T15:19:49+00:00", @@ -3482,25 +8435,22 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI6R6Z2FHHGS454W7W", "PolicyName": "AWSIoTLogging", "UpdateDate": "2015-10-08T15:17:25+00:00", "VersionId": "v1" }, - "AWSIoTRuleActions": { - "Arn": "arn:aws:iam::aws:policy/service-role/AWSIoTRuleActions", + "AWSIoTOTAUpdate": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSIoTOTAUpdate", "AttachmentCount": 0, - "CreateDate": "2015-10-08T15:14:51+00:00", + "CreateDate": "2017-12-20T20:36:53+00:00", "DefaultVersionId": "v1", "Document": { "Statement": { "Action": [ - "dynamodb:PutItem", - "kinesis:PutRecord", - "iot:Publish", - "s3:PutObject", - "sns:Publish", - "sqs:SendMessage*" + "iot:CreateJob", + "signer:DescribeSigningJob" ], "Effect": "Allow", "Resource": "*" @@ -3510,15 +8460,282 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJLJYWX53STBZFPUEY", + "PolicyName": "AWSIoTOTAUpdate", + "UpdateDate": "2017-12-20T20:36:53+00:00", + "VersionId": "v1" + }, + "AWSIoTRuleActions": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSIoTRuleActions", + "AttachmentCount": 0, + "CreateDate": "2015-10-08T15:14:51+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": { + "Action": [ + "dynamodb:PutItem", + "kinesis:PutRecord", + "iot:Publish", + "s3:PutObject", + "sns:Publish", + "sqs:SendMessage*", + "cloudwatch:SetAlarmState", + "cloudwatch:PutMetricData", + "es:ESHttpPut", + "firehose:PutRecord" + ], + "Effect": "Allow", + "Resource": "*" + }, + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJEZ6FS7BUZVUHMOKY", "PolicyName": "AWSIoTRuleActions", - "UpdateDate": "2015-10-08T15:14:51+00:00", + "UpdateDate": "2018-01-16T19:28:19+00:00", + "VersionId": "v2" + }, + "AWSIoTSiteWiseConsoleFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTSiteWiseConsoleFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-05-31T21:37:49+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "iotsitewise:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iotanalytics:List*", + "iotanalytics:Describe*", + "iotanalytics:Create*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iot:DescribeEndpoint", + "iot:GetThingShadow" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "greengrass:GetGroup", + "greengrass:GetGroupVersion", + "greengrass:GetCoreDefinitionVersion", + "greengrass:ListGroups" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "secretsmanager:ListSecrets", + "secretsmanager:CreateSecret" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "secretsmanager:UpdateSecret" + ], + "Effect": "Allow", + "Resource": "arn:aws:secretsmanager:*:*:secret:greengrass-*" + }, + { + "Action": [ + "tag:GetResources" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "iotsitewise.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/iotsitewise.amazonaws.com/AWSServiceRoleForIoTSiteWise*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": "iotsitewise.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/iotsitewise.amazonaws.com/AWSServiceRoleForIoTSiteWise*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4K7KP5VA7F", + "PolicyName": "AWSIoTSiteWiseConsoleFullAccess", + "UpdateDate": "2019-05-31T21:37:49+00:00", + "VersionId": "v1" + }, + "AWSIoTSiteWiseFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTSiteWiseFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-12-04T20:53:39+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iotsitewise:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAILUK3XBM6TZ5Q3PX2", + "PolicyName": "AWSIoTSiteWiseFullAccess", + "UpdateDate": "2018-12-04T20:53:39+00:00", + "VersionId": "v1" + }, + "AWSIoTSiteWiseReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSIoTSiteWiseReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-12-04T20:55:11+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iotsitewise:Describe*", + "iotsitewise:List*", + "iotsitewise:Get*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJLHEAFKME2QL64WKK", + "PolicyName": "AWSIoTSiteWiseReadOnlyAccess", + "UpdateDate": "2018-12-04T20:55:11+00:00", + "VersionId": "v1" + }, + "AWSIoTThingsRegistration": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSIoTThingsRegistration", + "AttachmentCount": 0, + "CreateDate": "2017-12-01T20:21:52+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iot:AddThingToThingGroup", + "iot:AttachPrincipalPolicy", + "iot:AttachThingPrincipal", + "iot:CreateCertificateFromCsr", + "iot:CreatePolicy", + "iot:CreateThing", + "iot:DescribeCertificate", + "iot:DescribeThing", + "iot:DescribeThingGroup", + "iot:DescribeThingType", + "iot:DetachThingPrincipal", + "iot:GetPolicy", + "iot:ListPolicyPrincipals", + "iot:ListPrincipalPolicies", + "iot:ListPrincipalThings", + "iot:ListThingGroupsForThing", + "iot:ListThingPrincipals", + "iot:RegisterCertificate", + "iot:RegisterThing", + "iot:RemoveThingFromThingGroup", + "iot:UpdateCertificate", + "iot:UpdateThing", + "iot:UpdateThingGroupsForThing" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI3YQXTC5XAEVTJNEU", + "PolicyName": "AWSIoTThingsRegistration", + "UpdateDate": "2017-12-01T20:21:52+00:00", + "VersionId": "v1" + }, + "AWSKeyManagementServiceCustomKeyStoresServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSKeyManagementServiceCustomKeyStoresServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-14T20:10:53+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudhsm:Describe*", + "ec2:CreateNetworkInterface", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:DescribeSecurityGroups", + "ec2:RevokeSecurityGroupEgress", + "ec2:DeleteSecurityGroup" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIADMJEHVVYK5AUQOO", + "PolicyName": "AWSKeyManagementServiceCustomKeyStoresServiceRolePolicy", + "UpdateDate": "2018-11-14T20:10:53+00:00", "VersionId": "v1" }, "AWSKeyManagementServicePowerUser": { "Arn": "arn:aws:iam::aws:policy/AWSKeyManagementServicePowerUser", - "AttachmentCount": 1, - "CreateDate": "2017-03-07T00:55:11+00:00", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:40+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -3546,6 +8763,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJNPP7PPPPMJRV2SA4", "PolicyName": "AWSKeyManagementServicePowerUser", "UpdateDate": "2017-03-07T00:55:11+00:00", @@ -3553,7 +8771,7 @@ aws_managed_policies_data = """ }, "AWSLambdaBasicExecutionRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole", - "AttachmentCount": 0, + "AttachmentCount": 2, "CreateDate": "2015-04-09T15:03:43+00:00", "DefaultVersionId": "v1", "Document": { @@ -3573,6 +8791,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJNCQGXC42545SKXIK", "PolicyName": "AWSLambdaBasicExecutionRole", "UpdateDate": "2015-04-09T15:03:43+00:00", @@ -3604,6 +8823,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIP7WNAGMIPYNW4WQG", "PolicyName": "AWSLambdaDynamoDBExecutionRole", "UpdateDate": "2015-04-09T15:09:29+00:00", @@ -3631,6 +8851,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJXAW2Q3KPTURUT2QC", "PolicyName": "AWSLambdaENIManagementAccess", "UpdateDate": "2016-12-06T00:37:27+00:00", @@ -3664,6 +8885,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJE5FX7FQZSU5XAKGO", "PolicyName": "AWSLambdaExecute", "UpdateDate": "2015-02-06T18:40:46+00:00", @@ -3672,55 +8894,64 @@ aws_managed_policies_data = """ "AWSLambdaFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSLambdaFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-05-25T19:08:45+00:00", - "DefaultVersionId": "v7", + "CreateDate": "2015-02-06T18:40:45+00:00", + "DefaultVersionId": "v8", "Document": { "Statement": [ { "Action": [ + "cloudformation:DescribeChangeSet", + "cloudformation:DescribeStackResources", + "cloudformation:DescribeStacks", + "cloudformation:GetTemplate", + "cloudformation:ListStackResources", "cloudwatch:*", "cognito-identity:ListIdentityPools", "cognito-sync:GetCognitoEvents", "cognito-sync:SetCognitoEvents", "dynamodb:*", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", "events:*", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:GetRole", + "iam:GetRolePolicy", "iam:ListAttachedRolePolicies", "iam:ListRolePolicies", "iam:ListRoles", "iam:PassRole", + "iot:AttachPrincipalPolicy", + "iot:AttachThingPrincipal", + "iot:CreateKeysAndCertificate", + "iot:CreatePolicy", + "iot:CreateThing", + "iot:CreateTopicRule", + "iot:DescribeEndpoint", + "iot:GetTopicRule", + "iot:ListPolicies", + "iot:ListThings", + "iot:ListTopicRules", + "iot:ReplaceTopicRule", "kinesis:DescribeStream", "kinesis:ListStreams", "kinesis:PutRecord", + "kms:ListAliases", "lambda:*", "logs:*", "s3:*", "sns:ListSubscriptions", "sns:ListSubscriptionsByTopic", "sns:ListTopics", + "sns:Publish", "sns:Subscribe", "sns:Unsubscribe", - "sns:Publish", "sqs:ListQueues", "sqs:SendMessage", "tag:GetResources", - "kms:ListAliases", - "ec2:DescribeVpcs", - "ec2:DescribeSubnets", - "ec2:DescribeSecurityGroups", - "iot:GetTopicRule", - "iot:ListTopicRules", - "iot:CreateTopicRule", - "iot:ReplaceTopicRule", - "iot:AttachPrincipalPolicy", - "iot:AttachThingPrincipal", - "iot:CreateKeysAndCertificate", - "iot:CreatePolicy", - "iot:CreateThing", - "iot:ListPolicies", - "iot:ListThings", - "iot:DescribeEndpoint", - "xray:PutTraceSegments", - "xray:PutTelemetryRecords" + "xray:PutTelemetryRecords", + "xray:PutTraceSegments" ], "Effect": "Allow", "Resource": "*" @@ -3731,10 +8962,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI6E2CYYMI4XI7AA5K", "PolicyName": "AWSLambdaFullAccess", - "UpdateDate": "2017-05-25T19:08:45+00:00", - "VersionId": "v7" + "UpdateDate": "2017-11-27T23:22:38+00:00", + "VersionId": "v8" }, "AWSLambdaInvocation-DynamoDB": { "Arn": "arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB", @@ -3766,6 +8998,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJTHQ3EKCQALQDYG5G", "PolicyName": "AWSLambdaInvocation-DynamoDB", "UpdateDate": "2015-02-06T18:40:47+00:00", @@ -3775,15 +9008,18 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole", "AttachmentCount": 0, "CreateDate": "2015-04-09T15:14:16+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ "kinesis:DescribeStream", + "kinesis:DescribeStreamSummary", "kinesis:GetRecords", "kinesis:GetShardIterator", + "kinesis:ListShards", "kinesis:ListStreams", + "kinesis:SubscribeToShard", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents" @@ -3797,20 +9033,26 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJHOLKJPXV4GBRMJUQ", "PolicyName": "AWSLambdaKinesisExecutionRole", - "UpdateDate": "2015-04-09T15:14:16+00:00", - "VersionId": "v1" + "UpdateDate": "2018-11-19T20:09:24+00:00", + "VersionId": "v2" }, "AWSLambdaReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AWSLambdaReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-05-04T18:22:29+00:00", - "DefaultVersionId": "v6", + "CreateDate": "2015-02-06T18:40:44+00:00", + "DefaultVersionId": "v8", "Document": { "Statement": [ { "Action": [ + "cloudformation:DescribeChangeSet", + "cloudformation:DescribeStackResources", + "cloudformation:DescribeStacks", + "cloudformation:GetTemplate", + "cloudformation:ListStackResources", "cloudwatch:Describe*", "cloudwatch:Get*", "cloudwatch:List*", @@ -3824,33 +9066,39 @@ aws_managed_policies_data = """ "dynamodb:ListTables", "dynamodb:Query", "dynamodb:Scan", - "events:List*", - "events:Describe*", - "iam:ListRoles", - "kinesis:DescribeStream", - "kinesis:ListStreams", - "lambda:List*", - "lambda:Get*", - "logs:DescribeMetricFilters", - "logs:GetLogEvents", - "logs:DescribeLogGroups", - "logs:DescribeLogStreams", - "s3:Get*", - "s3:List*", - "sns:ListTopics", - "sns:ListSubscriptions", - "sns:ListSubscriptionsByTopic", - "sqs:ListQueues", - "tag:GetResources", - "kms:ListAliases", - "ec2:DescribeVpcs", - "ec2:DescribeSubnets", "ec2:DescribeSecurityGroups", - "iot:GetTopicRules", - "iot:ListTopicRules", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "events:Describe*", + "events:List*", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:GetRole", + "iam:GetRolePolicy", + "iam:ListAttachedRolePolicies", + "iam:ListRolePolicies", + "iam:ListRoles", + "iot:DescribeEndpoint", + "iot:GetTopicRule", "iot:ListPolicies", "iot:ListThings", - "iot:DescribeEndpoint" + "iot:ListTopicRules", + "kinesis:DescribeStream", + "kinesis:ListStreams", + "kms:ListAliases", + "lambda:Get*", + "lambda:List*", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:DescribeMetricFilters", + "logs:GetLogEvents", + "s3:Get*", + "s3:List*", + "sns:ListSubscriptions", + "sns:ListSubscriptionsByTopic", + "sns:ListTopics", + "sqs:ListQueues", + "tag:GetResources" ], "Effect": "Allow", "Resource": "*" @@ -3861,10 +9109,67 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJLDG7J3CGUHFN4YN6", "PolicyName": "AWSLambdaReadOnlyAccess", - "UpdateDate": "2017-05-04T18:22:29+00:00", - "VersionId": "v6" + "UpdateDate": "2018-09-06T18:04:54+00:00", + "VersionId": "v8" + }, + "AWSLambdaReplicator": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSLambdaReplicator", + "AttachmentCount": 0, + "CreateDate": "2017-05-23T17:53:03+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "lambda:CreateFunction", + "lambda:DeleteFunction", + "lambda:DisableReplication" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:lambda:*:*:function:*" + ], + "Sid": "LambdaCreateDeletePermission" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLikeIfExists": { + "iam:PassedToService": "lambda.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "IamPassRolePermission" + }, + { + "Action": [ + "cloudfront:ListDistributionsByLambdaFunction" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "CloudFrontListDistributions" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIIQFXZNNLL3E2HKTG", + "PolicyName": "AWSLambdaReplicator", + "UpdateDate": "2017-12-08T00:17:54+00:00", + "VersionId": "v3" }, "AWSLambdaRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaRole", @@ -3888,11 +9193,43 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJX4DPCRGTC4NFDUXI", "PolicyName": "AWSLambdaRole", "UpdateDate": "2015-02-06T18:41:28+00:00", "VersionId": "v1" }, + "AWSLambdaSQSQueueExecutionRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaSQSQueueExecutionRole", + "AttachmentCount": 0, + "CreateDate": "2018-06-14T21:50:45+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sqs:ReceiveMessage", + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJFWJZI6JNND4TSELK", + "PolicyName": "AWSLambdaSQSQueueExecutionRole", + "UpdateDate": "2018-06-14T21:50:45+00:00", + "VersionId": "v1" + }, "AWSLambdaVPCAccessExecutionRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole", "AttachmentCount": 0, @@ -3918,16 +9255,322 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJVTME3YLVNL72YR2K", "PolicyName": "AWSLambdaVPCAccessExecutionRole", "UpdateDate": "2016-02-11T23:15:26+00:00", "VersionId": "v1" }, + "AWSLicenseManagerMasterAccountRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSLicenseManagerMasterAccountRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-26T19:03:51+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:GetBucketLocation", + "s3:ListBucket", + "s3:GetLifecycleConfiguration", + "s3:PutLifecycleConfiguration", + "s3:GetBucketPolicy", + "s3:PutBucketPolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-license-manager-service-*" + ], + "Sid": "S3BucketPermissions" + }, + { + "Action": [ + "s3:AbortMultipartUpload", + "s3:PutObject", + "s3:GetObject", + "s3:ListBucketMultipartUploads", + "s3:ListMultipartUploadParts" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-license-manager-service-*" + ], + "Sid": "S3ObjectPermissions1" + }, + { + "Action": [ + "s3:DeleteObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-license-manager-service-*/resource_sync/*" + ], + "Sid": "S3ObjectPermissions2" + }, + { + "Action": [ + "athena:GetQueryExecution", + "athena:GetQueryResults", + "athena:StartQueryExecution" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "AthenaPermissions" + }, + { + "Action": [ + "glue:GetTable", + "glue:GetPartition", + "glue:GetPartitions" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "GluePermissions" + }, + { + "Action": [ + "organizations:DescribeOrganization", + "organizations:ListAccounts", + "organizations:DescribeAccount", + "organizations:ListChildren", + "organizations:ListParents", + "organizations:ListAccountsForParent", + "organizations:ListRoots", + "organizations:ListAWSServiceAccessForOrganization" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "OrganizationPermissions" + }, + { + "Action": [ + "ram:GetResourceShares", + "ram:GetResourceShareAssociations", + "ram:TagResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "RAMPermissions1" + }, + { + "Action": [ + "ram:CreateResourceShare" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/Service": "LicenseManager" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "RAMPermissions2" + }, + { + "Action": [ + "ram:AssociateResourceShare", + "ram:DisassociateResourceShare", + "ram:UpdateResourceShare", + "ram:DeleteResourceShare" + ], + "Condition": { + "StringEquals": { + "ram:ResourceTag/Service": "LicenseManager" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "RAMPermissions3" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIJE2NOZW2BDEHYUH2", + "PolicyName": "AWSLicenseManagerMasterAccountRolePolicy", + "UpdateDate": "2018-11-26T19:03:51+00:00", + "VersionId": "v1" + }, + "AWSLicenseManagerMemberAccountRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSLicenseManagerMemberAccountRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-26T19:04:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "license-manager:UpdateLicenseSpecificationsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "LicenseManagerPermissions" + }, + { + "Action": [ + "ssm:ListInventoryEntries", + "ssm:GetInventory", + "ssm:CreateAssociation", + "ssm:CreateResourceDataSync", + "ssm:DeleteResourceDataSync", + "ssm:ListResourceDataSync", + "ssm:ListAssociations" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "SSMPermissions" + }, + { + "Action": [ + "ram:AcceptResourceShareInvitation", + "ram:GetResourceShareInvitations" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "RAMPermissions" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJZTYEY2LEGBYAVUY4", + "PolicyName": "AWSLicenseManagerMemberAccountRolePolicy", + "UpdateDate": "2018-11-26T19:04:32+00:00", + "VersionId": "v1" + }, + "AWSLicenseManagerServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSLicenseManagerServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-26T19:02:53+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:GetBucketLocation", + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-license-manager-service-*" + ], + "Sid": "S3BucketPermissions1" + }, + { + "Action": [ + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "S3BucketPermissions2" + }, + { + "Action": [ + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::aws-license-manager-service-*" + ], + "Sid": "S3ObjectPermissions" + }, + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sns:*:*:aws-license-manager-service-*" + ], + "Sid": "SNSAccountPermissions" + }, + { + "Action": [ + "sns:ListTopics" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "SNSTopicPermissions" + }, + { + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeImages", + "ec2:DescribeHosts" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "EC2Permissions" + }, + { + "Action": [ + "ssm:ListInventoryEntries", + "ssm:GetInventory", + "ssm:CreateAssociation" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "SSMPermissions" + }, + { + "Action": [ + "organizations:ListAWSServiceAccessForOrganization", + "organizations:DescribeOrganization" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "OrganizationPermissions" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIM7JPETWHTYNBQSZE", + "PolicyName": "AWSLicenseManagerServiceRolePolicy", + "UpdateDate": "2018-11-26T19:02:53+00:00", + "VersionId": "v1" + }, "AWSMarketplaceFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceFullAccess", "AttachmentCount": 0, "CreateDate": "2015-02-11T17:21:45+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -3960,6 +9603,67 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "ec2:CopyImage", + "ec2:DeregisterImage", + "ec2:DescribeSnapshots", + "ec2:DeleteSnapshot", + "ec2:CreateImage", + "ec2:DescribeInstanceStatus", + "ssm:GetAutomationExecution", + "ssm:UpdateDocumentDefaultVersion", + "ssm:CreateDocument", + "ssm:StartAutomationExecution", + "ssm:ListDocuments", + "ssm:UpdateDocument", + "ssm:DescribeDocument", + "sns:ListTopics", + "sns:GetTopicAttributes", + "sns:CreateTopic", + "iam:GetRole", + "iam:GetInstanceProfile", + "iam:ListRoles", + "iam:ListInstanceProfiles" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:ListBucket", + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*image-build*" + ] + }, + { + "Action": [ + "sns:Publish", + "sns:setTopicAttributes" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:*image-build*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "ec2.amazonaws.com", + "ssm.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] } ], "Version": "2012-10-17" @@ -3967,10 +9671,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI2DV5ULJSO2FYVPYG", "PolicyName": "AWSMarketplaceFullAccess", - "UpdateDate": "2015-02-11T17:21:45+00:00", - "VersionId": "v1" + "UpdateDate": "2018-08-08T21:13:02+00:00", + "VersionId": "v3" }, "AWSMarketplaceGetEntitlements": { "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceGetEntitlements", @@ -3992,11 +9697,121 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJLPIMQE4WMHDC2K7C", "PolicyName": "AWSMarketplaceGetEntitlements", "UpdateDate": "2017-03-27T19:37:24+00:00", "VersionId": "v1" }, + "AWSMarketplaceImageBuildFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceImageBuildFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-07-31T23:29:49+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "aws-marketplace:ListBuilds", + "aws-marketplace:StartBuild", + "aws-marketplace:DescribeBuilds" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:TerminateInstances", + "Condition": { + "StringLike": { + "ec2:ResourceTag/marketplace-image-build:build-id": "*" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "ec2.amazonaws.com", + "ssm.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/*Automation*", + "arn:aws:iam::*:role/*Instance*" + ] + }, + { + "Action": [ + "ssm:GetAutomationExecution", + "ssm:CreateDocument", + "ssm:StartAutomationExecution", + "ssm:ListDocuments", + "ssm:UpdateDocument", + "ssm:UpdateDocumentDefaultVersion", + "ssm:DescribeDocument", + "ec2:DeregisterImage", + "ec2:CopyImage", + "ec2:DescribeSnapshots", + "ec2:DescribeSecurityGroups", + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:DeleteSnapshot", + "ec2:CreateImage", + "ec2:RunInstances", + "ec2:DescribeInstanceStatus", + "sns:GetTopicAttributes", + "iam:GetRole", + "iam:GetInstanceProfile" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject", + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*image-build*" + ] + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*::image/*", + "arn:aws:ec2:*:*:instance/*" + ] + }, + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sns:*:*:*image-build*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI4QBMJWC3BNHBHN6I", + "PolicyName": "AWSMarketplaceImageBuildFullAccess", + "UpdateDate": "2018-08-08T21:11:59+00:00", + "VersionId": "v2" + }, "AWSMarketplaceManageSubscriptions": { "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceManageSubscriptions", "AttachmentCount": 0, @@ -4019,6 +9834,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJRDW2WIFN7QLUAKBQ", "PolicyName": "AWSMarketplaceManageSubscriptions", "UpdateDate": "2015-02-06T18:40:32+00:00", @@ -4044,6 +9860,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ65YJPG7CC7LDXNA6", "PolicyName": "AWSMarketplaceMeteringFullAccess", "UpdateDate": "2016-03-17T22:39:22+00:00", @@ -4053,7 +9870,7 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AWSMarketplaceRead-only", "AttachmentCount": 0, "CreateDate": "2015-02-06T18:40:31+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -4070,6 +9887,18 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "aws-marketplace:ListBuilds", + "aws-marketplace:DescribeBuilds", + "iam:ListRoles", + "iam:ListInstanceProfiles", + "sns:GetTopicAttributes", + "sns:ListTopics" + ], + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -4077,10 +9906,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJOOM6LETKURTJ3XZ2", "PolicyName": "AWSMarketplaceRead-only", - "UpdateDate": "2015-02-06T18:40:31+00:00", - "VersionId": "v1" + "UpdateDate": "2018-07-31T23:24:24+00:00", + "VersionId": "v2" }, "AWSMigrationHubDMSAccess": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSMigrationHubDMSAccess", @@ -4127,6 +9957,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIUQB56VA4JHLN7G2W", "PolicyName": "AWSMigrationHubDMSAccess", "UpdateDate": "2017-08-14T14:00:06+00:00", @@ -4155,6 +9986,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAITRMRLSV7JAL6YIGG", "PolicyName": "AWSMigrationHubDiscoveryAccess", "UpdateDate": "2017-08-14T13:30:51+00:00", @@ -4163,8 +9995,8 @@ aws_managed_policies_data = """ "AWSMigrationHubFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSMigrationHubFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-14T14:09:27+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2017-08-14T14:02:54+00:00", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -4181,6 +10013,24 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "continuousexport.discovery.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/continuousexport.discovery.amazonaws.com/AWSServiceRoleForApplicationDiscoveryServiceContinuousExport*" + }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/continuousexport.discovery.amazonaws.com/AWSServiceRoleForApplicationDiscoveryServiceContinuousExport*" } ], "Version": "2012-10-17" @@ -4188,10 +10038,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ4A2SZKHUYHDYIGOK", "PolicyName": "AWSMigrationHubFullAccess", - "UpdateDate": "2017-08-14T14:09:27+00:00", - "VersionId": "v2" + "UpdateDate": "2018-08-16T20:29:37+00:00", + "VersionId": "v3" }, "AWSMigrationHubSMSAccess": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSMigrationHubSMSAccess", @@ -4238,6 +10089,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIWQYYT6TSVIRJO4TY", "PolicyName": "AWSMigrationHubSMSAccess", "UpdateDate": "2017-08-14T13:57:54+00:00", @@ -4246,8 +10098,8 @@ aws_managed_policies_data = """ "AWSMobileHub_FullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSMobileHub_FullAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-10T22:23:47+00:00", - "DefaultVersionId": "v10", + "CreateDate": "2016-01-05T19:56:01+00:00", + "DefaultVersionId": "v13", "Document": { "Statement": [ { @@ -4257,6 +10109,15 @@ aws_managed_policies_data = """ "apigateway:GetResources", "apigateway:POST", "apigateway:TestInvokeMethod", + "cloudfront:GetDistribution", + "devicefarm:CreateProject", + "devicefarm:ListJobs", + "devicefarm:ListRuns", + "devicefarm:GetProject", + "devicefarm:GetRun", + "devicefarm:ListArtifacts", + "devicefarm:ListProjects", + "devicefarm:ScheduleRun", "dynamodb:DescribeTable", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", @@ -4281,6 +10142,8 @@ aws_managed_policies_data = """ "mobilehub:GenerateProjectParameters", "mobilehub:GetProject", "mobilehub:GetProjectSnapshot", + "mobilehub:ListProjectSnapshots", + "mobilehub:DeleteProjectSnapshot", "mobilehub:ListAvailableConnectors", "mobilehub:ListAvailableFeatures", "mobilehub:ListAvailableRegions", @@ -4300,6 +10163,20 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "arn:aws:s3:::*/aws-my-sample-app*.zip" + }, + { + "Action": [ + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::*-mobilehub-*/*" + }, + { + "Action": [ + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::*-mobilehub-*" } ], "Version": "2012-10-17" @@ -4307,16 +10184,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIJLU43R6AGRBK76DM", "PolicyName": "AWSMobileHub_FullAccess", - "UpdateDate": "2017-08-10T22:23:47+00:00", - "VersionId": "v10" + "UpdateDate": "2018-02-05T23:44:29+00:00", + "VersionId": "v13" }, "AWSMobileHub_ReadOnly": { "Arn": "arn:aws:iam::aws:policy/AWSMobileHub_ReadOnly", "AttachmentCount": 0, - "CreateDate": "2017-08-10T22:08:23+00:00", - "DefaultVersionId": "v8", + "CreateDate": "2016-01-05T19:55:48+00:00", + "DefaultVersionId": "v10", "Document": { "Statement": [ { @@ -4336,7 +10214,9 @@ aws_managed_policies_data = """ "mobilehub:ExportProject", "mobilehub:GenerateProjectParameters", "mobilehub:GetProject", + "mobilehub:SynchronizeProject", "mobilehub:GetProjectSnapshot", + "mobilehub:ListProjectSnapshots", "mobilehub:ListAvailableConnectors", "mobilehub:ListAvailableFeatures", "mobilehub:ListAvailableRegions", @@ -4363,280 +10243,29 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIBXVYVL3PWQFBZFGW", "PolicyName": "AWSMobileHub_ReadOnly", - "UpdateDate": "2017-08-10T22:08:23+00:00", - "VersionId": "v8" + "UpdateDate": "2018-07-23T21:59:05+00:00", + "VersionId": "v10" }, - "AWSMobileHub_ServiceUseOnly": { - "Arn": "arn:aws:iam::aws:policy/service-role/AWSMobileHub_ServiceUseOnly", + "AWSOpsWorksCMInstanceProfileRole": { + "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksCMInstanceProfileRole", "AttachmentCount": 0, - "CreateDate": "2017-06-02T23:35:49+00:00", - "DefaultVersionId": "v23", + "CreateDate": "2016-11-24T09:48:22+00:00", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ - "cloudformation:CreateUploadBucket", - "cloudformation:ValidateTemplate", - "cloudfront:CreateDistribution", - "cloudfront:DeleteDistribution", - "cloudfront:GetDistribution", - "cloudfront:GetDistributionConfig", - "cloudfront:UpdateDistribution", - "cognito-identity:CreateIdentityPool", - "cognito-identity:UpdateIdentityPool", - "cognito-identity:DeleteIdentityPool", - "cognito-identity:SetIdentityPoolRoles", - "cognito-idp:CreateUserPool", - "dynamodb:CreateTable", - "dynamodb:DeleteTable", - "dynamodb:DescribeTable", - "dynamodb:UpdateTable", - "iam:AddClientIDToOpenIDConnectProvider", - "iam:CreateOpenIDConnectProvider", - "iam:GetOpenIDConnectProvider", - "iam:ListOpenIDConnectProviders", - "iam:CreateSAMLProvider", - "iam:GetSAMLProvider", - "iam:ListSAMLProvider", - "iam:UpdateSAMLProvider", - "lambda:CreateFunction", - "lambda:DeleteFunction", - "lambda:GetFunction", - "mobileanalytics:CreateApp", - "mobileanalytics:DeleteApp", - "sns:CreateTopic", - "sns:DeleteTopic", - "sns:ListPlatformApplications", - "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets", - "ec2:DescribeVpcs", - "lex:PutIntent", - "lex:GetIntent", - "lex:GetIntents", - "lex:PutSlotType", - "lex:GetSlotType", - "lex:GetSlotTypes", - "lex:PutBot", - "lex:GetBot", - "lex:GetBots", - "lex:GetBotAlias", - "lex:GetBotAliases" + "cloudformation:DescribeStackResource", + "cloudformation:SignalResource" ], "Effect": "Allow", "Resource": [ "*" ] }, - { - "Action": [ - "sns:CreatePlatformApplication", - "sns:DeletePlatformApplication", - "sns:GetPlatformApplicationAttributes", - "sns:SetPlatformApplicationAttributes" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:sns:*:*:app/*_MOBILEHUB_*" - ] - }, - { - "Action": [ - "s3:CreateBucket", - "s3:DeleteBucket", - "s3:DeleteBucketPolicy", - "s3:DeleteBucketWebsite", - "s3:ListBucket", - "s3:ListBucketVersions", - "s3:GetBucketLocation", - "s3:GetBucketVersioning", - "s3:PutBucketVersioning", - "s3:PutBucketWebsite", - "s3:PutBucketPolicy", - "s3:SetBucketCrossOriginConfiguration" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:s3:::*-userfiles-mobilehub-*", - "arn:aws:s3:::*-contentdelivery-mobilehub-*", - "arn:aws:s3:::*-hosting-mobilehub-*", - "arn:aws:s3:::*-deployments-mobilehub-*" - ] - }, - { - "Action": [ - "s3:DeleteObject", - "s3:DeleteVersion", - "s3:DeleteObjectVersion", - "s3:GetObject", - "s3:GetObjectVersion", - "s3:PutObject", - "s3:PutObjectAcl" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:s3:::*-userfiles-mobilehub-*/*", - "arn:aws:s3:::*-contentdelivery-mobilehub-*/*", - "arn:aws:s3:::*-hosting-mobilehub-*/*", - "arn:aws:s3:::*-deployments-mobilehub-*/*" - ] - }, - { - "Action": [ - "lambda:AddPermission", - "lambda:CreateAlias", - "lambda:DeleteAlias", - "lambda:UpdateAlias", - "lambda:GetFunctionConfiguration", - "lambda:GetPolicy", - "lambda:RemovePermission", - "lambda:UpdateFunctionCode", - "lambda:UpdateFunctionConfiguration" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:lambda:*:*:function:*-mobilehub-*" - ] - }, - { - "Action": [ - "iam:CreateRole", - "iam:DeleteRole", - "iam:DeleteRolePolicy", - "iam:GetRole", - "iam:GetRolePolicy", - "iam:ListRolePolicies", - "iam:PassRole", - "iam:PutRolePolicy", - "iam:UpdateAssumeRolePolicy", - "iam:AttachRolePolicy", - "iam:DetachRolePolicy" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:iam::*:role/*_unauth_MOBILEHUB_*", - "arn:aws:iam::*:role/*_auth_MOBILEHUB_*", - "arn:aws:iam::*:role/*_consolepush_MOBILEHUB_*", - "arn:aws:iam::*:role/*_lambdaexecutionrole_MOBILEHUB_*", - "arn:aws:iam::*:role/*_smsverification_MOBILEHUB_*", - "arn:aws:iam::*:role/*_botexecutionrole_MOBILEHUB_*", - "arn:aws:iam::*:role/pinpoint-events", - "arn:aws:iam::*:role/MOBILEHUB-*-lambdaexecution*", - "arn:aws:iam::*:role/MobileHub_Service_Role" - ] - }, - { - "Action": [ - "iam:CreateServiceLinkedRole", - "iam:GetRole" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots" - ] - }, - { - "Action": [ - "logs:CreateLogGroup", - "logs:CreateLogStream", - "logs:PutLogEvents" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:logs:*:*:log-group:/aws/mobilehub/*:log-stream:*" - ] - }, - { - "Action": [ - "iam:ListAttachedRolePolicies" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:iam::*:role/MobileHub_Service_Role" - ] - }, - { - "Action": [ - "cloudformation:CreateStack", - "cloudformation:DeleteStack", - "cloudformation:DescribeStacks", - "cloudformation:DescribeStackEvents", - "cloudformation:DescribeStackResource", - "cloudformation:GetTemplate", - "cloudformation:ListStackResources", - "cloudformation:ListStacks", - "cloudformation:UpdateStack" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:cloudformation:*:*:stack/MOBILEHUB-*" - ] - }, - { - "Action": [ - "apigateway:DELETE", - "apigateway:GET", - "apigateway:HEAD", - "apigateway:OPTIONS", - "apigateway:PATCH", - "apigateway:POST", - "apigateway:PUT" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:apigateway:*::/restapis*" - ] - }, - { - "Action": [ - "cognito-idp:DeleteUserPool", - "cognito-idp:DescribeUserPool", - "cognito-idp:CreateUserPoolClient", - "cognito-idp:DescribeUserPoolClient", - "cognito-idp:DeleteUserPoolClient" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:cognito-idp:*:*:userpool/*" - ] - }, - { - "Action": [ - "mobiletargeting:UpdateApnsChannel", - "mobiletargeting:UpdateApnsSandboxChannel", - "mobiletargeting:UpdateEmailChannel", - "mobiletargeting:UpdateGcmChannel", - "mobiletargeting:UpdateSmsChannel", - "mobiletargeting:DeleteApnsChannel", - "mobiletargeting:DeleteApnsSandboxChannel", - "mobiletargeting:DeleteEmailChannel", - "mobiletargeting:DeleteGcmChannel", - "mobiletargeting:DeleteSmsChannel" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:mobiletargeting:*:*:apps/*/channels/*" - ] - } - ], - "Version": "2012-10-17" - }, - "IsAttachable": true, - "IsDefaultVersion": true, - "Path": "/service-role/", - "PolicyId": "ANPAIUHPQXBDZUWOP3PSK", - "PolicyName": "AWSMobileHub_ServiceUseOnly", - "UpdateDate": "2017-06-02T23:35:49+00:00", - "VersionId": "v23" - }, - "AWSOpsWorksCMInstanceProfileRole": { - "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksCMInstanceProfileRole", - "AttachmentCount": 0, - "CreateDate": "2016-11-24T09:48:22+00:00", - "DefaultVersionId": "v1", - "Document": { - "Statement": [ { "Action": [ "s3:AbortMultipartUpload", @@ -4656,16 +10285,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAICSU3OSHCURP2WIZW", "PolicyName": "AWSOpsWorksCMInstanceProfileRole", - "UpdateDate": "2016-11-24T09:48:22+00:00", - "VersionId": "v1" + "UpdateDate": "2017-11-03T12:01:32+00:00", + "VersionId": "v2" }, "AWSOpsWorksCMServiceRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSOpsWorksCMServiceRole", "AttachmentCount": 0, - "CreateDate": "2017-04-03T12:00:07+00:00", - "DefaultVersionId": "v6", + "CreateDate": "2016-11-24T09:49:46+00:00", + "DefaultVersionId": "v8", "Document": { "Statement": [ { @@ -4677,7 +10307,8 @@ aws_managed_policies_data = """ "s3:HeadBucket", "s3:ListBucket", "s3:ListObjects", - "s3:PutBucketPolicy" + "s3:PutBucketPolicy", + "s3:PutObject" ], "Effect": "Allow", "Resource": [ @@ -4752,7 +10383,8 @@ aws_managed_policies_data = """ }, { "Action": [ - "ec2:TerminateInstances" + "ec2:TerminateInstances", + "ec2:RebootInstances" ], "Condition": { "StringLike": { @@ -4764,6 +10396,16 @@ aws_managed_policies_data = """ "*" ] }, + { + "Action": [ + "opsworks-cm:DeleteServer", + "opsworks-cm:StartMaintenance" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:opsworks-cm:*:*:server/*" + ] + }, { "Action": [ "cloudformation:CreateStack", @@ -4794,10 +10436,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ6I6MPGJE62URSHCO", "PolicyName": "AWSOpsWorksCMServiceRole", - "UpdateDate": "2017-04-03T12:00:07+00:00", - "VersionId": "v6" + "UpdateDate": "2019-02-21T15:15:07+00:00", + "VersionId": "v8" }, "AWSOpsWorksCloudWatchLogs": { "Arn": "arn:aws:iam::aws:policy/AWSOpsWorksCloudWatchLogs", @@ -4824,6 +10467,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJXFIK7WABAY5CPXM4", "PolicyName": "AWSOpsWorksCloudWatchLogs", "UpdateDate": "2017-03-30T17:47:19+00:00", @@ -4864,6 +10508,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAICN26VXMXASXKOQCG", "PolicyName": "AWSOpsWorksFullAccess", "UpdateDate": "2015-02-06T18:40:48+00:00", @@ -4893,6 +10538,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJG3LCPVNI4WDZCIMU", "PolicyName": "AWSOpsWorksInstanceRegistration", "UpdateDate": "2016-06-03T14:23:15+00:00", @@ -4951,6 +10597,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ3AB5ZBFPCQGTVDU4", "PolicyName": "AWSOpsWorksRegisterCLI", "UpdateDate": "2015-02-06T18:40:49+00:00", @@ -4994,11 +10641,164 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIDUTMOKHJFAPJV45W", "PolicyName": "AWSOpsWorksRole", "UpdateDate": "2015-02-06T18:41:27+00:00", "VersionId": "v1" }, + "AWSOrganizationsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSOrganizationsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-06T20:31:57+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "organizations:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJZXBNRCJKNLQHSB5M", + "PolicyName": "AWSOrganizationsFullAccess", + "UpdateDate": "2018-11-06T20:31:57+00:00", + "VersionId": "v1" + }, + "AWSOrganizationsReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSOrganizationsReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-06T20:32:38+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "organizations:Describe*", + "organizations:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJY5RQATUV77PEPVOM", + "PolicyName": "AWSOrganizationsReadOnlyAccess", + "UpdateDate": "2018-11-06T20:32:38+00:00", + "VersionId": "v1" + }, + "AWSOrganizationsServiceTrustPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSOrganizationsServiceTrustPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-10T23:04:07+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "iam:DeleteRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/organizations.amazonaws.com/*" + ], + "Sid": "AllowDeletionOfServiceLinkedRoleForOrganizations" + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "AllowCreationOfServiceLinkedRoles" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIQH6ROMVVECFVRJPK", + "PolicyName": "AWSOrganizationsServiceTrustPolicy", + "UpdateDate": "2017-11-01T06:01:18+00:00", + "VersionId": "v2" + }, + "AWSPriceListServiceFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSPriceListServiceFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-22T00:36:27+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "pricing:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIADJ4GBYNHKABML3Q", + "PolicyName": "AWSPriceListServiceFullAccess", + "UpdateDate": "2017-11-22T00:36:27+00:00", + "VersionId": "v1" + }, + "AWSPrivateMarketplaceAdminFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSPrivateMarketplaceAdminFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-27T16:32:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "aws-marketplace:CreatePrivateMarketplace", + "aws-marketplace:CreatePrivateMarketplaceProfile", + "aws-marketplace:UpdatePrivateMarketplaceProfile", + "aws-marketplace:StartPrivateMarketplace", + "aws-marketplace:StopPrivateMarketplace", + "aws-marketplace:AssociateProductsWithPrivateMarketplace", + "aws-marketplace:DisassociateProductsFromPrivateMarketplace", + "aws-marketplace:DescribePrivateMarketplaceProfile", + "aws-marketplace:DescribePrivateMarketplaceStatus", + "aws-marketplace:ListPrivateMarketplaceProducts", + "aws-marketplace:DescribePrivateMarketplaceProducts" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ6VRZDDCYDOVCOCEI", + "PolicyName": "AWSPrivateMarketplaceAdminFullAccess", + "UpdateDate": "2018-11-27T16:32:32+00:00", + "VersionId": "v1" + }, "AWSQuickSightDescribeRDS": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSQuickSightDescribeRDS", "AttachmentCount": 0, @@ -5019,6 +10819,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJU5J6OAMCJD3OO76O", "PolicyName": "AWSQuickSightDescribeRDS", "UpdateDate": "2015-11-10T23:24:50+00:00", @@ -5044,11 +10845,40 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJFEM6MLSLTW4ZNBW2", "PolicyName": "AWSQuickSightDescribeRedshift", "UpdateDate": "2015-11-10T23:25:01+00:00", "VersionId": "v1" }, + "AWSQuickSightIoTAnalyticsAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSQuickSightIoTAnalyticsAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T17:00:54+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iotanalytics:ListDatasets", + "iotanalytics:DescribeDataset", + "iotanalytics:GetDatasetContent" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJIZNDRUTKCN5HLZOE", + "PolicyName": "AWSQuickSightIoTAnalyticsAccess", + "UpdateDate": "2017-11-29T17:00:54+00:00", + "VersionId": "v1" + }, "AWSQuickSightListIAM": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSQuickSightListIAM", "AttachmentCount": 0, @@ -5069,6 +10899,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI3CH5UUWZN4EKGILO", "PolicyName": "AWSQuickSightListIAM", "UpdateDate": "2015-11-10T23:25:07+00:00", @@ -5077,8 +10908,8 @@ aws_managed_policies_data = """ "AWSQuicksightAthenaAccess": { "Arn": "arn:aws:iam::aws:policy/service-role/AWSQuicksightAthenaAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-11T23:37:32+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2016-12-09T02:31:03+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -5093,6 +10924,7 @@ aws_managed_policies_data = """ "athena:GetQueryExecution", "athena:GetQueryExecutions", "athena:GetQueryResults", + "athena:GetQueryResultsStream", "athena:GetTable", "athena:GetTables", "athena:ListQueryExecutions", @@ -5154,15 +10986,1124 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI4JB77JXFQXDWNRPM", "PolicyName": "AWSQuicksightAthenaAccess", - "UpdateDate": "2017-08-11T23:37:32+00:00", + "UpdateDate": "2018-08-07T20:24:55+00:00", + "VersionId": "v4" + }, + "AWSResourceAccessManagerServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSResourceAccessManagerServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-14T19:28:28+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "organizations:DescribeAccount", + "organizations:DescribeOrganization", + "organizations:DescribeOrganizationalUnit", + "organizations:ListAccounts", + "organizations:ListAccountsForParent", + "organizations:ListChildren", + "organizations:ListOrganizationalUnitsForParent", + "organizations:ListParents", + "organizations:ListRoots" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:DeleteRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/ram.amazonaws.com/*" + ], + "Sid": "AllowDeletionOfServiceLinkedRoleForResourceAccessManager" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJU667A3V5UAXC4YNE", + "PolicyName": "AWSResourceAccessManagerServiceRolePolicy", + "UpdateDate": "2018-11-14T19:28:28+00:00", + "VersionId": "v1" + }, + "AWSResourceGroupsReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSResourceGroupsReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-03-07T10:27:04+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "resource-groups:Get*", + "resource-groups:List*", + "resource-groups:Search*", + "tag:Get*", + "cloudformation:DescribeStacks", + "cloudformation:ListStackResources", + "ec2:DescribeInstances", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSnapshots", + "ec2:DescribeVolumes", + "ec2:DescribeVpcs", + "elasticache:DescribeCacheClusters", + "elasticache:DescribeSnapshots", + "elasticache:ListTagsForResource", + "elasticbeanstalk:DescribeEnvironments", + "elasticmapreduce:DescribeCluster", + "elasticmapreduce:ListClusters", + "glacier:ListVaults", + "glacier:DescribeVault", + "glacier:ListTagsForVault", + "kinesis:ListStreams", + "kinesis:DescribeStream", + "kinesis:ListTagsForStream", + "opsworks:DescribeStacks", + "opsworks:ListTags", + "rds:DescribeDBInstances", + "rds:DescribeDBSnapshots", + "rds:ListTagsForResource", + "redshift:DescribeClusters", + "redshift:DescribeTags", + "route53domains:ListDomains", + "route53:ListHealthChecks", + "route53:GetHealthCheck", + "route53:ListHostedZones", + "route53:GetHostedZone", + "route53:ListTagsForResource", + "storagegateway:ListGateways", + "storagegateway:DescribeGatewayInformation", + "storagegateway:ListTagsForResource", + "s3:ListAllMyBuckets", + "s3:GetBucketTagging", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTags", + "ssm:ListDocuments" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIXFKM2WGBJAEWMFEG", + "PolicyName": "AWSResourceGroupsReadOnlyAccess", + "UpdateDate": "2019-02-05T17:56:25+00:00", + "VersionId": "v2" + }, + "AWSRoboMakerFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSRoboMakerFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-26T05:28:10+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:GetObject", + "robomaker:*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "VisualEditor0" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "robomaker.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIG7WQVUX3AGSKGBAO", + "PolicyName": "AWSRoboMakerFullAccess", + "UpdateDate": "2018-11-26T05:28:10+00:00", + "VersionId": "v1" + }, + "AWSRoboMakerReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSRoboMakerReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-26T05:30:50+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "robomaker:ListDeploymentJobs", + "robomaker:BatchDescribeSimulationJob", + "robomaker:DescribeFleet", + "robomaker:DescribeSimulationApplication", + "robomaker:DescribeRobotApplication", + "robomaker:ListFleets", + "robomaker:ListSimulationJobs", + "robomaker:DescribeDeploymentJob", + "robomaker:DescribeSimulationJob", + "robomaker:DescribeRobot", + "robomaker:ListRobots", + "robomaker:ListRobotApplications", + "robomaker:ListSimulationApplications" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "VisualEditor0" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIXFHP2ALXXGGECYJI", + "PolicyName": "AWSRoboMakerReadOnlyAccess", + "UpdateDate": "2018-11-26T05:30:50+00:00", + "VersionId": "v1" + }, + "AWSRoboMakerServicePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSRoboMakerServicePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-26T06:30:08+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterfacePermission", + "ec2:DescribeNetworkInterfaces", + "ec2:DeleteNetworkInterface", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeSecurityGroups", + "greengrass:CreateDeployment", + "greengrass:CreateGroupVersion", + "greengrass:CreateFunctionDefinition", + "greengrass:CreateFunctionDefinitionVersion", + "greengrass:GetDeploymentStatus", + "greengrass:GetGroup", + "greengrass:GetGroupVersion", + "greengrass:GetCoreDefinitionVersion", + "greengrass:GetFunctionDefinitionVersion", + "greengrass:GetAssociatedRole", + "lambda:CreateFunction" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "lambda:UpdateFunctionCode", + "lambda:GetFunction", + "lambda:UpdateFunctionConfiguration", + "lambda:DeleteFunction", + "lambda:ListVersionsByFunction", + "lambda:GetAlias", + "lambda:UpdateAlias", + "lambda:CreateAlias", + "lambda:DeleteAlias" + ], + "Effect": "Allow", + "Resource": "arn:aws:lambda:*:*:function:aws-robomaker-*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringEqualsIfExists": { + "iam:PassedToService": "lambda.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJYLVVUUQMAEEZ3ZNY", + "PolicyName": "AWSRoboMakerServicePolicy", + "UpdateDate": "2019-04-04T22:15:35+00:00", + "VersionId": "v2" + }, + "AWSRoboMakerServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/AWSRoboMakerServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-26T05:33:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterfacePermission", + "ec2:DescribeNetworkInterfaces", + "ec2:DeleteNetworkInterface", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeSecurityGroups", + "greengrass:CreateDeployment", + "greengrass:CreateGroupVersion", + "greengrass:CreateFunctionDefinition", + "greengrass:CreateFunctionDefinitionVersion", + "greengrass:GetDeploymentStatus", + "greengrass:GetGroup", + "greengrass:GetGroupVersion", + "greengrass:GetCoreDefinitionVersion", + "greengrass:GetFunctionDefinitionVersion", + "greengrass:GetAssociatedRole", + "lambda:CreateFunction" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "lambda:UpdateFunctionCode", + "lambda:GetFunction", + "lambda:UpdateFunctionConfiguration" + ], + "Effect": "Allow", + "Resource": "arn:aws:lambda:*:*:function:aws-robomaker-*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringEqualsIfExists": { + "iam:PassedToService": "lambda.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIOSFFLBBLCTKS3ATC", + "PolicyName": "AWSRoboMakerServiceRolePolicy", + "UpdateDate": "2018-11-26T05:33:19+00:00", + "VersionId": "v1" + }, + "AWSSSODirectoryAdministrator": { + "Arn": "arn:aws:iam::aws:policy/AWSSSODirectoryAdministrator", + "AttachmentCount": 0, + "CreateDate": "2018-10-31T23:54:00+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sso-directory:*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "AWSSSODirectoryAdministrator" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI2TCZRD7WRD5D2E2Q", + "PolicyName": "AWSSSODirectoryAdministrator", + "UpdateDate": "2018-10-31T23:54:00+00:00", + "VersionId": "v1" + }, + "AWSSSODirectoryReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSSSODirectoryReadOnly", + "AttachmentCount": 0, + "CreateDate": "2018-10-31T23:49:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sso-directory:Search*", + "sso-directory:Describe*", + "sso-directory:List*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "AWSSSODirectoryReadOnly" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJDPMQELJXZD2NC6JG", + "PolicyName": "AWSSSODirectoryReadOnly", + "UpdateDate": "2018-10-31T23:49:32+00:00", + "VersionId": "v1" + }, + "AWSSSOMasterAccountAdministrator": { + "Arn": "arn:aws:iam::aws:policy/AWSSSOMasterAccountAdministrator", + "AttachmentCount": 0, + "CreateDate": "2018-06-27T20:36:51+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": "sso.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/sso.amazonaws.com/AWSServiceRoleForSSO", + "Sid": "AWSSSOMasterAccountAdministrator" + }, + { + "Action": [ + "ds:DescribeTrusts", + "ds:UnauthorizeApplication", + "ds:DescribeDirectories", + "ds:AuthorizeApplication", + "iam:ListPolicies", + "organizations:EnableAWSServiceAccess", + "organizations:ListRoots", + "organizations:ListAccounts", + "organizations:ListOrganizationalUnitsForParent", + "organizations:ListAccountsForParent", + "organizations:DescribeOrganization", + "organizations:ListChildren", + "organizations:DescribeAccount", + "organizations:ListParents", + "sso:*", + "sso-directory:DescribeDirectory", + "ds:CreateAlias" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "AWSSSOMemberAccountAdministrator" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIHXAQZIS3GOYIETUC", + "PolicyName": "AWSSSOMasterAccountAdministrator", + "UpdateDate": "2018-10-17T20:41:20+00:00", + "VersionId": "v3" + }, + "AWSSSOMemberAccountAdministrator": { + "Arn": "arn:aws:iam::aws:policy/AWSSSOMemberAccountAdministrator", + "AttachmentCount": 0, + "CreateDate": "2018-06-27T20:45:42+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ds:DescribeDirectories", + "ds:AuthorizeApplication", + "ds:UnauthorizeApplication", + "ds:DescribeTrusts", + "iam:ListPolicies", + "organizations:EnableAWSServiceAccess", + "organizations:DescribeOrganization", + "organizations:DescribeAccount", + "organizations:ListRoots", + "organizations:ListAccounts", + "organizations:ListAccountsForParent", + "organizations:ListParents", + "organizations:ListChildren", + "organizations:ListOrganizationalUnitsForParent", + "sso:*", + "sso-directory:DescribeDirectory", + "ds:CreateAlias" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "AWSSSOMemberAccountAdministrator" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIQYHEY7KJWXZFNDPY", + "PolicyName": "AWSSSOMemberAccountAdministrator", + "UpdateDate": "2018-10-17T20:35:52+00:00", + "VersionId": "v2" + }, + "AWSSSOReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AWSSSOReadOnly", + "AttachmentCount": 0, + "CreateDate": "2018-06-27T20:24:34+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ds:DescribeDirectories", + "ds:DescribeTrusts", + "iam:ListPolicies", + "organizations:DescribeOrganization", + "organizations:DescribeAccount", + "organizations:ListParents", + "organizations:ListChildren", + "organizations:ListAccounts", + "organizations:ListRoots", + "organizations:ListAccountsForParent", + "organizations:ListOrganizationalUnitsForParent", + "sso:DescribePermissionsPolicies", + "sso:GetApplicationTemplate", + "sso:GetApplicationInstance", + "sso:GetPermissionSet", + "sso:GetProfile", + "sso:GetPermissionsPolicy", + "sso:GetSSOStatus", + "sso:GetSSOConfiguration", + "sso:GetTrust", + "sso:ListPermissionSets", + "sso:ListDirectoryAssociations", + "sso:ListProfiles", + "sso:ListApplicationInstances", + "sso:ListApplicationInstanceCertificates", + "sso:ListApplicationTemplates", + "sso:ListApplications", + "sso:ListProfileAssociations", + "sso:Search*", + "sso-directory:DescribeDirectory" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "AWSSSOReadOnly" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJBSMEEZXFDMKMY43I", + "PolicyName": "AWSSSOReadOnly", + "UpdateDate": "2018-12-19T20:17:58+00:00", + "VersionId": "v3" + }, + "AWSSSOServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSSSOServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-12-05T18:36:15+00:00", + "DefaultVersionId": "v6", + "Document": { + "Statement": [ + { + "Action": [ + "iam:AttachRolePolicy", + "iam:CreateRole", + "iam:DeleteRole", + "iam:DeleteRolePolicy", + "iam:DetachRolePolicy", + "iam:GetRole", + "iam:ListRolePolicies", + "iam:PutRolePolicy", + "iam:ListAttachedRolePolicies", + "iam:UpdateRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-reserved/sso.amazonaws.com/*" + ] + }, + { + "Action": [ + "iam:ListRoles" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "ListRolesInTheAccount" + }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus", + "iam:DeleteRole", + "iam:GetRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/sso.amazonaws.com/AWSServiceRoleForSSO" + ], + "Sid": "AllowDeletionOfServiceLinkedRoleForSSO" + }, + { + "Action": [ + "iam:CreateSAMLProvider", + "iam:GetSAMLProvider", + "iam:UpdateSAMLProvider", + "iam:DeleteSAMLProvider" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:saml-provider/AWSSSO_*" + ] + }, + { + "Action": [ + "organizations:DescribeAccount", + "organizations:DescribeOrganization", + "organizations:ListAccounts" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ds:UnauthorizeApplication" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "AllowUnauthAppForDirectory" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIJ52KSWOD4GI54XP2", + "PolicyName": "AWSSSOServiceRolePolicy", + "UpdateDate": "2019-05-15T20:45:42+00:00", + "VersionId": "v6" + }, + "AWSSecurityHubFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSSecurityHubFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-27T23:54:34+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "securityhub:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "securityhub.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ4262VZCA4HPBZSO6", + "PolicyName": "AWSSecurityHubFullAccess", + "UpdateDate": "2018-11-27T23:54:34+00:00", + "VersionId": "v1" + }, + "AWSSecurityHubReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSSecurityHubReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T01:34:29+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "securityhub:Get*", + "securityhub:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIEBAQNOFUCLFJ3UHG", + "PolicyName": "AWSSecurityHubReadOnlyAccess", + "UpdateDate": "2018-11-28T01:34:29+00:00", + "VersionId": "v1" + }, + "AWSSecurityHubServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSSecurityHubServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-27T23:47:51+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudtrail:DescribeTrails", + "cloudtrail:GetTrailStatus", + "cloudtrail:GetEventSelectors", + "cloudwatch:DescribeAlarms", + "logs:DescribeMetricFilters", + "sns:ListSubscriptionsByTopic", + "config:DescribeConfigurationRecorders", + "config:DescribeConfigurationRecorderStatus", + "config:DescribeConfigRules", + "config:BatchGetResourceConfig" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "config:PutConfigRule", + "config:DeleteConfigRule", + "config:GetComplianceDetailsByConfigRule" + ], + "Effect": "Allow", + "Resource": "arn:aws:config:*:*:config-rule/aws-service-rule/*securityhub*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJQPCESDDYDLLSOGYO", + "PolicyName": "AWSSecurityHubServiceRolePolicy", + "UpdateDate": "2018-11-27T23:47:51+00:00", + "VersionId": "v1" + }, + "AWSServiceCatalogAdminFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSServiceCatalogAdminFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-02-15T17:19:40+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStacks", + "cloudformation:SetStackPolicy", + "cloudformation:UpdateStack", + "cloudformation:CreateChangeSet", + "cloudformation:DescribeChangeSet", + "cloudformation:ExecuteChangeSet", + "cloudformation:ListChangeSets", + "cloudformation:DeleteChangeSet", + "cloudformation:ListStackResources", + "cloudformation:TagResource", + "cloudformation:CreateStackSet", + "cloudformation:CreateStackInstances", + "cloudformation:UpdateStackSet", + "cloudformation:UpdateStackInstances", + "cloudformation:DeleteStackSet", + "cloudformation:DeleteStackInstances", + "cloudformation:DescribeStackSet", + "cloudformation:DescribeStackInstance", + "cloudformation:DescribeStackSetOperation", + "cloudformation:ListStackInstances", + "cloudformation:ListStackSetOperations", + "cloudformation:ListStackSetOperationResults" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/SC-*", + "arn:aws:cloudformation:*:*:stack/StackSet-SC-*", + "arn:aws:cloudformation:*:*:changeSet/SC-*", + "arn:aws:cloudformation:*:*:stackset/SC-*" + ] + }, + { + "Action": [ + "cloudformation:CreateUploadBucket", + "cloudformation:GetTemplateSummary", + "cloudformation:ValidateTemplate", + "iam:GetGroup", + "iam:GetRole", + "iam:GetUser", + "iam:ListGroups", + "iam:ListRoles", + "iam:ListUsers", + "servicecatalog:*", + "ssm:DescribeDocument", + "ssm:GetAutomationExecution", + "ssm:ListDocuments", + "ssm:ListDocumentVersions", + "config:DescribeConfigurationRecorders", + "config:DescribeConfigurationRecorderStatus" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringEquals": { + "iam:PassedToService": "servicecatalog.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJWLJU4BZ7AQUJSBVM", + "PolicyName": "AWSServiceCatalogAdminFullAccess", + "UpdateDate": "2019-02-06T01:57:54+00:00", + "VersionId": "v5" + }, + "AWSServiceCatalogEndUserFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSServiceCatalogEndUserFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-02-15T17:22:32+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStacks", + "cloudformation:SetStackPolicy", + "cloudformation:ValidateTemplate", + "cloudformation:UpdateStack", + "cloudformation:CreateChangeSet", + "cloudformation:DescribeChangeSet", + "cloudformation:ExecuteChangeSet", + "cloudformation:ListChangeSets", + "cloudformation:DeleteChangeSet", + "cloudformation:TagResource", + "cloudformation:CreateStackSet", + "cloudformation:CreateStackInstances", + "cloudformation:UpdateStackSet", + "cloudformation:UpdateStackInstances", + "cloudformation:DeleteStackSet", + "cloudformation:DeleteStackInstances", + "cloudformation:DescribeStackSet", + "cloudformation:DescribeStackInstance", + "cloudformation:DescribeStackSetOperation", + "cloudformation:ListStackInstances", + "cloudformation:ListStackResources", + "cloudformation:ListStackSetOperations", + "cloudformation:ListStackSetOperationResults" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/SC-*", + "arn:aws:cloudformation:*:*:stack/StackSet-SC-*", + "arn:aws:cloudformation:*:*:changeSet/SC-*", + "arn:aws:cloudformation:*:*:stackset/SC-*" + ] + }, + { + "Action": [ + "cloudformation:GetTemplateSummary", + "servicecatalog:DescribeProduct", + "servicecatalog:DescribeProductView", + "servicecatalog:DescribeProvisioningParameters", + "servicecatalog:ListLaunchPaths", + "servicecatalog:ProvisionProduct", + "servicecatalog:SearchProducts", + "ssm:DescribeDocument", + "ssm:GetAutomationExecution", + "config:DescribeConfigurationRecorders", + "config:DescribeConfigurationRecorderStatus" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "servicecatalog:DescribeProvisionedProduct", + "servicecatalog:DescribeRecord", + "servicecatalog:ListRecordHistory", + "servicecatalog:ScanProvisionedProducts", + "servicecatalog:TerminateProvisionedProduct", + "servicecatalog:UpdateProvisionedProduct", + "servicecatalog:SearchProvisionedProducts", + "servicecatalog:CreateProvisionedProductPlan", + "servicecatalog:DescribeProvisionedProductPlan", + "servicecatalog:ExecuteProvisionedProductPlan", + "servicecatalog:DeleteProvisionedProductPlan", + "servicecatalog:ListProvisionedProductPlans", + "servicecatalog:ListServiceActionsForProvisioningArtifact", + "servicecatalog:ExecuteProvisionedProductServiceAction" + ], + "Condition": { + "StringEquals": { + "servicecatalog:userLevel": "self" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJTLLC4DGDMTZB54M4", + "PolicyName": "AWSServiceCatalogEndUserFullAccess", + "UpdateDate": "2019-02-06T02:00:22+00:00", + "VersionId": "v5" + }, + "AWSServiceRoleForEC2ScheduledInstances": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSServiceRoleForEC2ScheduledInstances", + "AttachmentCount": 0, + "CreateDate": "2017-10-12T18:31:55+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateTags" + ], + "Condition": { + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "aws:ec2sri:scheduledInstanceId" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:instance/*" + ] + }, + { + "Action": [ + "ec2:TerminateInstances" + ], + "Condition": { + "StringLike": { + "ec2:ResourceTag/aws:ec2sri:scheduledInstanceId": "*" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ7Y4TT63D6QBKCY4O", + "PolicyName": "AWSServiceRoleForEC2ScheduledInstances", + "UpdateDate": "2017-10-12T18:31:55+00:00", + "VersionId": "v1" + }, + "AWSServiceRoleForIoTSiteWise": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSServiceRoleForIoTSiteWise", + "AttachmentCount": 0, + "CreateDate": "2018-11-14T19:19:17+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": "iotanalytics:ExecuteQuery", + "Effect": "Allow", + "Resource": "arn:aws:iotanalytics:*:*:datastore-index/*" + }, + { + "Action": [ + "greengrass:CreateCoreDefinitionVersion", + "greengrass:CreateDeployment", + "greengrass:CreateFunctionDefinition", + "greengrass:CreateFunctionDefinitionVersion", + "greengrass:CreateGroupVersion", + "greengrass:CreateLoggerDefinition", + "greengrass:CreateLoggerDefinitionVersion", + "greengrass:CreateResourceDefinition", + "greengrass:CreateResourceDefinitionVersion", + "greengrass:GetAssociatedRole", + "greengrass:GetCoreDefinition", + "greengrass:GetCoreDefinitionVersion", + "greengrass:GetDeploymentStatus", + "greengrass:GetFunctionDefinition", + "greengrass:GetFunctionDefinitionVersion", + "greengrass:GetGroup", + "greengrass:GetGroupVersion", + "greengrass:GetLoggerDefinition", + "greengrass:GetLoggerDefinitionVersion", + "greengrass:GetResourceDefinition", + "greengrass:GetResourceDefinitionVersion", + "greengrass:ListCoreDefinitions", + "greengrass:UpdateCoreDefinition", + "greengrass:UpdateFunctionDefinition", + "greengrass:UpdateLoggerDefinition", + "greengrass:UpdateResourceDefinition" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "lambda:CreateAlias", + "lambda:CreateFunction", + "lambda:GetFunction", + "lambda:ListVersionsByFunction", + "lambda:UpdateFunctionCode", + "lambda:PublishVersion", + "lambda:UpdateAlias" + ], + "Effect": "Allow", + "Resource": "arn:aws:lambda:*:*:function:AWSIoTSiteWise*" + }, + { + "Action": [ + "iot:GetThingShadow", + "iot:UpdateThingShadow" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLikeIfExists": { + "iam:PassedToService": "lambda.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJGQU4DZIQP6HLYQPE", + "PolicyName": "AWSServiceRoleForIoTSiteWise", + "UpdateDate": "2019-02-11T20:49:09+00:00", + "VersionId": "v3" + }, + "AWSShieldDRTAccessPolicy": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy", + "AttachmentCount": 0, + "CreateDate": "2018-06-05T22:29:39+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "cloudfront:List*", + "elasticloadbalancing:List*", + "route53:List*", + "cloudfront:Describe*", + "elasticloadbalancing:Describe*", + "route53:Describe*", + "cloudwatch:Describe*", + "cloudwatch:Get*", + "cloudwatch:List*", + "cloudfront:GetDistribution*", + "globalaccelerator:ListAccelerators", + "globalaccelerator:DescribeAccelerator" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:*", + "arn:aws:cloudfront::*:*", + "arn:aws:route53:::hostedzone/*", + "arn:aws:cloudwatch:*:*:*:*", + "arn:aws:globalaccelerator::*:*" + ], + "Sid": "DRTAccessProtectedResources" + }, + { + "Action": [ + "waf:*", + "waf-regional:*" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:waf:*", + "arn:aws:waf-regional:*" + ], + "Sid": "DRTManageMitigations" + }, + { + "Action": [ + "shield:*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "DRTManageProtections" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJWNCSZ4PARLO37VVY", + "PolicyName": "AWSShieldDRTAccessPolicy", + "UpdateDate": "2019-02-11T17:08:57+00:00", "VersionId": "v3" }, "AWSStepFunctionsConsoleFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSStepFunctionsConsoleFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-01-12T00:19:34+00:00", + "CreateDate": "2017-01-11T21:54:31+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -5192,6 +12133,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJIYC52YWRX6OSMJWK", "PolicyName": "AWSStepFunctionsConsoleFullAccess", "UpdateDate": "2017-01-12T00:19:34+00:00", @@ -5215,6 +12157,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJXKA6VP3UFBVHDPPA", "PolicyName": "AWSStepFunctionsFullAccess", "UpdateDate": "2017-01-11T21:51:32+00:00", @@ -5224,7 +12167,7 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AWSStepFunctionsReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2017-01-11T21:46:19+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -5232,6 +12175,7 @@ aws_managed_policies_data = """ "states:ListStateMachines", "states:ListActivities", "states:DescribeStateMachine", + "states:DescribeStateMachineForExecution", "states:ListExecutions", "states:DescribeExecution", "states:GetExecutionHistory", @@ -5246,10 +12190,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJONHB2TJQDJPFW5TM", "PolicyName": "AWSStepFunctionsReadOnlyAccess", - "UpdateDate": "2017-01-11T21:46:19+00:00", - "VersionId": "v1" + "UpdateDate": "2017-11-10T22:03:49+00:00", + "VersionId": "v2" }, "AWSStorageGatewayFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSStorageGatewayFullAccess", @@ -5279,6 +12224,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJG5SSPAVOGK3SIDGU", "PolicyName": "AWSStorageGatewayFullAccess", "UpdateDate": "2015-02-06T18:41:09+00:00", @@ -5312,6 +12258,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIFKCTUVOPD5NICXJK", "PolicyName": "AWSStorageGatewayReadOnlyAccess", "UpdateDate": "2015-02-06T18:41:10+00:00", @@ -5337,15 +12284,1388 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJSNKQX2OW67GF4S7E", "PolicyName": "AWSSupportAccess", "UpdateDate": "2015-02-06T18:41:11+00:00", "VersionId": "v1" }, + "AWSSupportServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSSupportServiceRolePolicy", + "AttachmentCount": 1, + "CreateDate": "2018-04-19T18:04:44+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "apigateway:GET" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:apigateway:*::/account", + "arn:aws:apigateway:*::/clientcertificates", + "arn:aws:apigateway:*::/clientcertificates/*", + "arn:aws:apigateway:*::/domainnames", + "arn:aws:apigateway:*::/domainnames/*", + "arn:aws:apigateway:*::/domainnames/*/basepathmappings", + "arn:aws:apigateway:*::/domainnames/*/basepathmappings/*", + "arn:aws:apigateway:*::/restapis", + "arn:aws:apigateway:*::/restapis/*", + "arn:aws:apigateway:*::/restapis/*/authorizers", + "arn:aws:apigateway:*::/restapis/*/authorizers/*", + "arn:aws:apigateway:*::/restapis/*/deployments", + "arn:aws:apigateway:*::/restapis/*/deployments/*", + "arn:aws:apigateway:*::/restapis/*/models", + "arn:aws:apigateway:*::/restapis/*/models/*", + "arn:aws:apigateway:*::/restapis/*/models/*/default_template", + "arn:aws:apigateway:*::/restapis/*/resources", + "arn:aws:apigateway:*::/restapis/*/resources/*", + "arn:aws:apigateway:*::/restapis/*/resources/*/methods/*/integration/responses/*", + "arn:aws:apigateway:*::/restapis/*/resources/*/methods/*/responses/*", + "arn:aws:apigateway:*::/restapis/*/stages/*/sdks/*", + "arn:aws:apigateway:*::/restapis/*/resources/*/methods/*", + "arn:aws:apigateway:*::/restapis/*/resources/*/methods/*/integration", + "arn:aws:apigateway:*::/restapis/*/stages", + "arn:aws:apigateway:*::/restapis/*/stages/*" + ] + }, + { + "Action": [ + "iam:DeleteRole" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/support.amazonaws.com/AWSServiceRoleForSupport" + ] + }, + { + "Action": [ + "a4b:getDevice", + "a4b:getProfile", + "a4b:getRoom", + "a4b:getRoomSkillParameter", + "a4b:getSkillGroup", + "a4b:searchDevices", + "a4b:searchProfiles", + "a4b:searchRooms", + "a4b:searchSkillGroups", + "acm-pca:describeCertificateAuthority", + "acm-pca:describeCertificateAuthorityAuditReport", + "acm-pca:getCertificate", + "acm-pca:getCertificateAuthorityCertificate", + "acm-pca:getCertificateAuthorityCsr", + "acm-pca:listCertificateAuthorities", + "acm-pca:listTags", + "acm:describeCertificate", + "acm:getCertificate", + "acm:listCertificates", + "acm:listTagsForCertificate", + "application-autoscaling:describeScalableTargets", + "application-autoscaling:describeScalingActivities", + "application-autoscaling:describeScalingPolicies", + "appstream:describeDirectoryConfigs", + "appstream:describeFleets", + "appstream:describeImageBuilders", + "appstream:describeImages", + "appstream:describeSessions", + "appstream:describeStacks", + "appstream:listAssociatedFleets", + "appstream:listAssociatedStacks", + "appstream:listTagsForResource", + "appsync:getFunction", + "appsync:getGraphqlApi", + "appsync:getIntrospectionSchema", + "appsync:getResolver", + "appsync:getSchemaCreationStatus", + "appsync:getType", + "appsync:listDataSources", + "appsync:listFunctions", + "appsync:listGraphqlApis", + "appsync:listResolvers", + "appsync:listTypes", + "athena:batchGetNamedQuery", + "athena:batchGetQueryExecution", + "athena:getNamedQuery", + "athena:getQueryExecution", + "athena:listNamedQueries", + "athena:listQueryExecutions", + "autoscaling-plans:describeScalingPlanResources", + "autoscaling-plans:describeScalingPlans", + "autoscaling-plans:getScalingPlanResourceForecastData", + "autoscaling:describeAccountLimits", + "autoscaling:describeAdjustmentTypes", + "autoscaling:describeAutoScalingGroups", + "autoscaling:describeAutoScalingInstances", + "autoscaling:describeAutoScalingNotificationTypes", + "autoscaling:describeLaunchConfigurations", + "autoscaling:describeLifecycleHooks", + "autoscaling:describeLifecycleHookTypes", + "autoscaling:describeLoadBalancers", + "autoscaling:describeLoadBalancerTargetGroups", + "autoscaling:describeMetricCollectionTypes", + "autoscaling:describeNotificationConfigurations", + "autoscaling:describePolicies", + "autoscaling:describeScalingActivities", + "autoscaling:describeScalingProcessTypes", + "autoscaling:describeScheduledActions", + "autoscaling:describeTags", + "autoscaling:describeTerminationPolicyTypes", + "batch:describeComputeEnvironments", + "batch:describeJobDefinitions", + "batch:describeJobQueues", + "batch:describeJobs", + "batch:listJobs", + "cloud9:describeEnvironmentMemberships", + "cloud9:describeEnvironments", + "cloud9:listEnvironments", + "clouddirectory:getDirectory", + "clouddirectory:listDirectories", + "cloudformation:describeAccountLimits", + "cloudformation:describeChangeSet", + "cloudformation:describeStackEvents", + "cloudformation:describeStackInstance", + "cloudformation:describeStackResource", + "cloudformation:describeStackResources", + "cloudformation:describeStacks", + "cloudformation:describeStackSet", + "cloudformation:describeStackSetOperation", + "cloudformation:estimateTemplateCost", + "cloudformation:getStackPolicy", + "cloudformation:getTemplate", + "cloudformation:getTemplateSummary", + "cloudformation:listChangeSets", + "cloudformation:listExports", + "cloudformation:listImports", + "cloudformation:listStackInstances", + "cloudformation:listStackResources", + "cloudformation:listStacks", + "cloudformation:listStackSetOperationResults", + "cloudformation:listStackSetOperations", + "cloudformation:listStackSets", + "cloudfront:getCloudFrontOriginAccessIdentity", + "cloudfront:getCloudFrontOriginAccessIdentityConfig", + "cloudfront:getDistribution", + "cloudfront:getDistributionConfig", + "cloudfront:getInvalidation", + "cloudfront:getStreamingDistribution", + "cloudfront:getStreamingDistributionConfig", + "cloudfront:listCloudFrontOriginAccessIdentities", + "cloudfront:listDistributions", + "cloudfront:listDistributionsByWebACLId", + "cloudfront:listInvalidations", + "cloudfront:listStreamingDistributions", + "cloudhsm:describeBackups", + "cloudhsm:describeClusters", + "cloudsearch:describeAnalysisSchemes", + "cloudsearch:describeAvailabilityOptions", + "cloudsearch:describeDomains", + "cloudsearch:describeExpressions", + "cloudsearch:describeIndexFields", + "cloudsearch:describeScalingParameters", + "cloudsearch:describeServiceAccessPolicies", + "cloudsearch:describeSuggesters", + "cloudsearch:listDomainNames", + "cloudtrail:describeTrails", + "cloudtrail:getEventSelectors", + "cloudtrail:getTrailStatus", + "cloudtrail:listPublicKeys", + "cloudtrail:listTags", + "cloudtrail:lookupEvents", + "cloudwatch:describeAlarmHistory", + "cloudwatch:describeAlarms", + "cloudwatch:describeAlarmsForMetric", + "cloudwatch:getDashboard", + "cloudwatch:getMetricData", + "cloudwatch:getMetricStatistics", + "cloudwatch:listDashboards", + "cloudwatch:listMetrics", + "codebuild:batchGetBuilds", + "codebuild:batchGetProjects", + "codebuild:listBuilds", + "codebuild:listBuildsForProject", + "codebuild:listCuratedEnvironmentImages", + "codebuild:listProjects", + "codecommit:batchGetRepositories", + "codecommit:getBranch", + "codecommit:getRepository", + "codecommit:getRepositoryTriggers", + "codecommit:listBranches", + "codecommit:listRepositories", + "codedeploy:batchGetApplications", + "codedeploy:batchGetDeployments", + "codedeploy:batchGetOnPremisesInstances", + "codedeploy:getApplication", + "codedeploy:getApplicationRevision", + "codedeploy:getDeployment", + "codedeploy:getDeploymentConfig", + "codedeploy:getDeploymentGroup", + "codedeploy:getDeploymentInstance", + "codedeploy:getOnPremisesInstance", + "codedeploy:listApplicationRevisions", + "codedeploy:listApplications", + "codedeploy:listDeploymentConfigs", + "codedeploy:listDeploymentGroups", + "codedeploy:listDeploymentInstances", + "codedeploy:listDeployments", + "codedeploy:listOnPremisesInstances", + "codepipeline:getJobDetails", + "codepipeline:getPipeline", + "codepipeline:getPipelineExecution", + "codepipeline:getPipelineState", + "codepipeline:listActionTypes", + "codepipeline:listPipelines", + "codestar:describeProject", + "codestar:listProjects", + "codestar:listResources", + "codestar:listTeamMembers", + "codestar:listUserProfiles", + "cognito-identity:describeIdentityPool", + "cognito-identity:getIdentityPoolRoles", + "cognito-identity:listIdentities", + "cognito-identity:listIdentityPools", + "cognito-idp:adminGetUser", + "cognito-idp:describeIdentityProvider", + "cognito-idp:describeResourceServer", + "cognito-idp:describeRiskConfiguration", + "cognito-idp:describeUserImportJob", + "cognito-idp:describeUserPool", + "cognito-idp:describeUserPoolClient", + "cognito-idp:describeUserPoolDomain", + "cognito-idp:getGroup", + "cognito-idp:getUICustomization", + "cognito-idp:getUser", + "cognito-idp:getUserPoolMfaConfig", + "cognito-idp:listGroups", + "cognito-idp:listIdentityProviders", + "cognito-idp:listResourceServers", + "cognito-idp:listUserImportJobs", + "cognito-idp:listUserPoolClients", + "cognito-idp:listUserPools", + "cognito-sync:describeDataset", + "cognito-sync:describeIdentityPoolUsage", + "cognito-sync:describeIdentityUsage", + "cognito-sync:getCognitoEvents", + "cognito-sync:getIdentityPoolConfiguration", + "cognito-sync:listDatasets", + "cognito-sync:listIdentityPoolUsage", + "config:describeConfigRuleEvaluationStatus", + "config:describeConfigRules", + "config:describeConfigurationRecorders", + "config:describeConfigurationRecorderStatus", + "config:describeDeliveryChannels", + "config:describeDeliveryChannelStatus", + "config:getResourceConfigHistory", + "config:listDiscoveredResources", + "datapipeline:describeObjects", + "datapipeline:describePipelines", + "datapipeline:getPipelineDefinition", + "datapipeline:listPipelines", + "datapipeline:queryObjects", + "dax:describeClusters", + "dax:describeDefaultParameters", + "dax:describeEvents", + "dax:describeParameterGroups", + "dax:describeParameters", + "dax:describeSubnetGroups", + "devicefarm:getAccountSettings", + "devicefarm:getDevice", + "devicefarm:getDevicePool", + "devicefarm:getDevicePoolCompatibility", + "devicefarm:getJob", + "devicefarm:getProject", + "devicefarm:getRemoteAccessSession", + "devicefarm:getRun", + "devicefarm:getSuite", + "devicefarm:getTest", + "devicefarm:getUpload", + "devicefarm:listArtifacts", + "devicefarm:listDevicePools", + "devicefarm:listDevices", + "devicefarm:listJobs", + "devicefarm:listProjects", + "devicefarm:listRemoteAccessSessions", + "devicefarm:listRuns", + "devicefarm:listSamples", + "devicefarm:listSuites", + "devicefarm:listTests", + "devicefarm:listUniqueProblems", + "devicefarm:listUploads", + "directconnect:describeConnections", + "directconnect:describeConnectionsOnInterconnect", + "directconnect:describeInterconnects", + "directconnect:describeLocations", + "directconnect:describeVirtualGateways", + "directconnect:describeVirtualInterfaces", + "dlm:getLifecyclePolicies", + "dlm:getLifecyclePolicy", + "dms:describeAccountAttributes", + "dms:describeConnections", + "dms:describeEndpoints", + "dms:describeEndpointTypes", + "dms:describeOrderableReplicationInstances", + "dms:describeRefreshSchemasStatus", + "dms:describeReplicationInstances", + "dms:describeReplicationSubnetGroups", + "ds:describeConditionalForwarders", + "ds:describeDirectories", + "ds:describeEventTopics", + "ds:describeSnapshots", + "ds:describeTrusts", + "ds:getDirectoryLimits", + "ds:getSnapshotLimits", + "ds:listIpRoutes", + "ds:listSchemaExtensions", + "ds:listTagsForResource", + "dynamodb:describeBackup", + "dynamodb:describeContinuousBackups", + "dynamodb:describeGlobalTable", + "dynamodb:describeLimits", + "dynamodb:describeStream", + "dynamodb:describeTable", + "dynamodb:describeTimeToLive", + "dynamodb:listBackups", + "dynamodb:listGlobalTables", + "dynamodb:listStreams", + "dynamodb:listTables", + "dynamodb:listTagsOfResource", + "ec2:acceptReservedInstancesExchangeQuote", + "ec2:cancelReservedInstancesListing", + "ec2:createReservedInstancesListing", + "ec2:describeAccountAttributes", + "ec2:describeAddresses", + "ec2:describeAvailabilityZones", + "ec2:describeBundleTasks", + "ec2:describeByoipCidrs", + "ec2:describeClassicLinkInstances", + "ec2:describeConversionTasks", + "ec2:describeCustomerGateways", + "ec2:describeDhcpOptions", + "ec2:describeElasticGpus", + "ec2:describeExportTasks", + "ec2:describeFlowLogs", + "ec2:describeHostReservationOfferings", + "ec2:describeHostReservations", + "ec2:describeHosts", + "ec2:describeIdentityIdFormat", + "ec2:describeIdFormat", + "ec2:describeImageAttribute", + "ec2:describeImages", + "ec2:describeImportImageTasks", + "ec2:describeImportSnapshotTasks", + "ec2:describeInstanceAttribute", + "ec2:describeInstances", + "ec2:describeInstanceStatus", + "ec2:describeInternetGateways", + "ec2:describeKeyPairs", + "ec2:describeLaunchTemplates", + "ec2:describeLaunchTemplateVersions", + "ec2:describeMovingAddresses", + "ec2:describeNatGateways", + "ec2:describeNetworkAcls", + "ec2:describeNetworkInterfaceAttribute", + "ec2:describeNetworkInterfaces", + "ec2:describePlacementGroups", + "ec2:describePrefixLists", + "ec2:describePublicIpv4Pools", + "ec2:describeRegions", + "ec2:describeReservedInstances", + "ec2:describeReservedInstancesListings", + "ec2:describeReservedInstancesModifications", + "ec2:describeReservedInstancesOfferings", + "ec2:describeRouteTables", + "ec2:describeScheduledInstances", + "ec2:describeSecurityGroups", + "ec2:describeSnapshotAttribute", + "ec2:describeSnapshots", + "ec2:describeSpotDatafeedSubscription", + "ec2:describeSpotFleetInstances", + "ec2:describeSpotFleetRequestHistory", + "ec2:describeSpotFleetRequests", + "ec2:describeSpotInstanceRequests", + "ec2:describeSpotPriceHistory", + "ec2:describeSubnets", + "ec2:describeTags", + "ec2:describeVolumeAttribute", + "ec2:describeVolumes", + "ec2:describeVolumesModifications", + "ec2:describeVolumeStatus", + "ec2:describeVpcAttribute", + "ec2:describeVpcClassicLink", + "ec2:describeVpcClassicLinkDnsSupport", + "ec2:describeVpcEndpointConnectionNotifications", + "ec2:describeVpcEndpointConnections", + "ec2:describeVpcEndpoints", + "ec2:describeVpcEndpointServiceConfigurations", + "ec2:describeVpcEndpointServicePermissions", + "ec2:describeVpcEndpointServices", + "ec2:describeVpcPeeringConnections", + "ec2:describeVpcs", + "ec2:describeVpnConnections", + "ec2:describeVpnGateways", + "ec2:getConsoleScreenshot", + "ec2:getReservedInstancesExchangeQuote", + "ec2:modifyReservedInstances", + "ec2:purchaseReservedInstancesOffering", + "ecr:batchCheckLayerAvailability", + "ecr:describeImages", + "ecr:describeRepositories", + "ecr:getRepositoryPolicy", + "ecr:listImages", + "ecs:describeClusters", + "ecs:describeContainerInstances", + "ecs:describeServices", + "ecs:describeTaskDefinition", + "ecs:describeTasks", + "ecs:listClusters", + "ecs:listContainerInstances", + "ecs:listServices", + "ecs:listTaskDefinitions", + "ecs:listTasks", + "eks:describeCluster", + "eks:listClusters", + "elasticache:describeCacheClusters", + "elasticache:describeCacheEngineVersions", + "elasticache:describeCacheParameterGroups", + "elasticache:describeCacheParameters", + "elasticache:describeCacheSecurityGroups", + "elasticache:describeCacheSubnetGroups", + "elasticache:describeEngineDefaultParameters", + "elasticache:describeEvents", + "elasticache:describeReplicationGroups", + "elasticache:describeReservedCacheNodes", + "elasticache:describeReservedCacheNodesOfferings", + "elasticache:describeSnapshots", + "elasticache:listAllowedNodeTypeModifications", + "elasticache:listTagsForResource", + "elasticbeanstalk:checkDNSAvailability", + "elasticbeanstalk:describeApplications", + "elasticbeanstalk:describeApplicationVersions", + "elasticbeanstalk:describeConfigurationOptions", + "elasticbeanstalk:describeConfigurationSettings", + "elasticbeanstalk:describeEnvironmentHealth", + "elasticbeanstalk:describeEnvironmentManagedActionHistory", + "elasticbeanstalk:describeEnvironmentManagedActions", + "elasticbeanstalk:describeEnvironmentResources", + "elasticbeanstalk:describeEnvironments", + "elasticbeanstalk:describeEvents", + "elasticbeanstalk:describeInstancesHealth", + "elasticbeanstalk:describePlatformVersion", + "elasticbeanstalk:listAvailableSolutionStacks", + "elasticbeanstalk:listPlatformVersions", + "elasticbeanstalk:validateConfigurationSettings", + "elasticfilesystem:describeFileSystems", + "elasticfilesystem:describeMountTargets", + "elasticfilesystem:describeMountTargetSecurityGroups", + "elasticfilesystem:describeTags", + "elasticloadbalancing:describeInstanceHealth", + "elasticloadbalancing:describeListenerCertificates", + "elasticloadbalancing:describeListeners", + "elasticloadbalancing:describeLoadBalancerAttributes", + "elasticloadbalancing:describeLoadBalancerPolicies", + "elasticloadbalancing:describeLoadBalancerPolicyTypes", + "elasticloadbalancing:describeLoadBalancers", + "elasticloadbalancing:describeRules", + "elasticloadbalancing:describeSSLPolicies", + "elasticloadbalancing:describeTags", + "elasticloadbalancing:describeTargetGroupAttributes", + "elasticloadbalancing:describeTargetGroups", + "elasticloadbalancing:describeTargetHealth", + "elasticmapreduce:describeCluster", + "elasticmapreduce:describeSecurityConfiguration", + "elasticmapreduce:describeStep", + "elasticmapreduce:listBootstrapActions", + "elasticmapreduce:listClusters", + "elasticmapreduce:listInstanceGroups", + "elasticmapreduce:listInstances", + "elasticmapreduce:listSecurityConfigurations", + "elasticmapreduce:listSteps", + "elastictranscoder:listJobsByPipeline", + "elastictranscoder:listJobsByStatus", + "elastictranscoder:listPipelines", + "elastictranscoder:listPresets", + "elastictranscoder:readPipeline", + "elastictranscoder:readPreset", + "es:describeElasticsearchDomain", + "es:describeElasticsearchDomainConfig", + "es:describeElasticsearchDomains", + "es:listDomainNames", + "es:listTags", + "events:describeEventBus", + "events:describeRule", + "events:listRuleNamesByTarget", + "events:listRules", + "events:listTargetsByRule", + "events:testEventPattern", + "firehose:describeDeliveryStream", + "firehose:listDeliveryStreams", + "glacier:describeJob", + "glacier:describeVault", + "glacier:getDataRetrievalPolicy", + "glacier:getVaultAccessPolicy", + "glacier:getVaultLock", + "glacier:getVaultNotifications", + "glacier:listJobs", + "glacier:listTagsForVault", + "glacier:listVaults", + "glue:batchGetPartition", + "glue:getCatalogImportStatus", + "glue:getClassifier", + "glue:getClassifiers", + "glue:getCrawler", + "glue:getCrawlerMetrics", + "glue:getCrawlers", + "glue:getDatabase", + "glue:getDatabases", + "glue:getDataflowGraph", + "glue:getDevEndpoint", + "glue:getDevEndpoints", + "glue:getJob", + "glue:getJobRun", + "glue:getJobRuns", + "glue:getJobs", + "glue:getMapping", + "glue:getPartition", + "glue:getPartitions", + "glue:getTable", + "glue:getTables", + "glue:getTableVersions", + "glue:getTrigger", + "glue:getTriggers", + "glue:getUserDefinedFunction", + "glue:getUserDefinedFunctions", + "greengrass:getConnectivityInfo", + "greengrass:getCoreDefinition", + "greengrass:getCoreDefinitionVersion", + "greengrass:getDeploymentStatus", + "greengrass:getDeviceDefinition", + "greengrass:getDeviceDefinitionVersion", + "greengrass:getFunctionDefinition", + "greengrass:getFunctionDefinitionVersion", + "greengrass:getGroup", + "greengrass:getGroupCertificateAuthority", + "greengrass:getGroupVersion", + "greengrass:getLoggerDefinition", + "greengrass:getLoggerDefinitionVersion", + "greengrass:getResourceDefinitionVersion", + "greengrass:getServiceRoleForAccount", + "greengrass:getSubscriptionDefinition", + "greengrass:getSubscriptionDefinitionVersion", + "greengrass:listCoreDefinitions", + "greengrass:listCoreDefinitionVersions", + "greengrass:listDeployments", + "greengrass:listDeviceDefinitions", + "greengrass:listDeviceDefinitionVersions", + "greengrass:listFunctionDefinitions", + "greengrass:listFunctionDefinitionVersions", + "greengrass:listGroups", + "greengrass:listGroupVersions", + "greengrass:listLoggerDefinitions", + "greengrass:listLoggerDefinitionVersions", + "greengrass:listResourceDefinitions", + "greengrass:listResourceDefinitionVersions", + "greengrass:listSubscriptionDefinitions", + "greengrass:listSubscriptionDefinitionVersions", + "guardduty:getDetector", + "guardduty:getFindings", + "guardduty:getFindingsStatistics", + "guardduty:getInvitationsCount", + "guardduty:getIPSet", + "guardduty:getMasterAccount", + "guardduty:getMembers", + "guardduty:getThreatIntelSet", + "guardduty:listDetectors", + "guardduty:listFindings", + "guardduty:listInvitations", + "guardduty:listIPSets", + "guardduty:listMembers", + "guardduty:listThreatIntelSets", + "health:describeAffectedEntities", + "health:describeEntityAggregates", + "health:describeEventAggregates", + "health:describeEventDetails", + "health:describeEvents", + "health:describeEventTypes", + "iam:getAccessKeyLastUsed", + "iam:getAccountAuthorizationDetails", + "iam:getAccountPasswordPolicy", + "iam:getAccountSummary", + "iam:getContextKeysForCustomPolicy", + "iam:getContextKeysForPrincipalPolicy", + "iam:getCredentialReport", + "iam:getGroup", + "iam:getGroupPolicy", + "iam:getInstanceProfile", + "iam:getLoginProfile", + "iam:getOpenIDConnectProvider", + "iam:getPolicy", + "iam:getPolicyVersion", + "iam:getRole", + "iam:getRolePolicy", + "iam:getSAMLProvider", + "iam:getServerCertificate", + "iam:getSSHPublicKey", + "iam:getUser", + "iam:getUserPolicy", + "iam:listAccessKeys", + "iam:listAccountAliases", + "iam:listAttachedGroupPolicies", + "iam:listAttachedRolePolicies", + "iam:listAttachedUserPolicies", + "iam:listEntitiesForPolicy", + "iam:listGroupPolicies", + "iam:listGroups", + "iam:listGroupsForUser", + "iam:listInstanceProfiles", + "iam:listInstanceProfilesForRole", + "iam:listMFADevices", + "iam:listOpenIDConnectProviders", + "iam:listPolicies", + "iam:listPolicyVersions", + "iam:listRolePolicies", + "iam:listRoles", + "iam:listSAMLProviders", + "iam:listServerCertificates", + "iam:listSigningCertificates", + "iam:listSSHPublicKeys", + "iam:listUserPolicies", + "iam:listUsers", + "iam:listVirtualMFADevices", + "iam:simulateCustomPolicy", + "iam:simulatePrincipalPolicy", + "importexport:getStatus", + "importexport:listJobs", + "inspector:describeAssessmentRuns", + "inspector:describeAssessmentTargets", + "inspector:describeAssessmentTemplates", + "inspector:describeCrossAccountAccessRole", + "inspector:describeResourceGroups", + "inspector:describeRulesPackages", + "inspector:getTelemetryMetadata", + "inspector:listAssessmentRunAgents", + "inspector:listAssessmentRuns", + "inspector:listAssessmentTargets", + "inspector:listAssessmentTemplates", + "inspector:listEventSubscriptions", + "inspector:listRulesPackages", + "inspector:listTagsForResource", + "iot:describeAuthorizer", + "iot:describeCACertificate", + "iot:describeCertificate", + "iot:describeDefaultAuthorizer", + "iot:describeEndpoint", + "iot:describeIndex", + "iot:describeJobExecution", + "iot:describeThing", + "iot:describeThingGroup", + "iot:getEffectivePolicies", + "iot:getIndexingConfiguration", + "iot:getLoggingOptions", + "iot:getPolicy", + "iot:getPolicyVersion", + "iot:getTopicRule", + "iot:getV2LoggingOptions", + "iot:listAttachedPolicies", + "iot:listAuthorizers", + "iot:listCACertificates", + "iot:listCertificates", + "iot:listCertificatesByCA", + "iot:listJobExecutionsForJob", + "iot:listJobExecutionsForThing", + "iot:listJobs", + "iot:listOutgoingCertificates", + "iot:listPolicies", + "iot:listPolicyPrincipals", + "iot:listPolicyVersions", + "iot:listPrincipalPolicies", + "iot:listPrincipalThings", + "iot:listRoleAliases", + "iot:listTargetsForPolicy", + "iot:listThingGroups", + "iot:listThingGroupsForThing", + "iot:listThingPrincipals", + "iot:listThingRegistrationTasks", + "iot:listThings", + "iot:listThingTypes", + "iot:listTopicRules", + "iot:listV2LoggingLevels", + "kafka:describeCluster", + "kafka:getBootstrapBrokers", + "kafka:listClusters", + "kafka:listNodes", + "kinesis:describeStream", + "kinesis:listStreams", + "kinesis:listTagsForStream", + "kinesisanalytics:describeApplication", + "kinesisanalytics:listApplications", + "kms:describeKey", + "kms:getKeyPolicy", + "kms:getKeyRotationStatus", + "kms:listAliases", + "kms:listGrants", + "kms:listKeyPolicies", + "kms:listKeys", + "kms:listResourceTags", + "kms:listRetirableGrants", + "lambda:getAccountSettings", + "lambda:getAlias", + "lambda:getEventSourceMapping", + "lambda:getFunction", + "lambda:getFunctionConfiguration", + "lambda:getPolicy", + "lambda:listAliases", + "lambda:listEventSourceMappings", + "lambda:listFunctions", + "lambda:listVersionsByFunction", + "lex:getBot", + "lex:getBotAlias", + "lex:getBotAliases", + "lex:getBotChannelAssociation", + "lex:getBotChannelAssociations", + "lex:getBots", + "lex:getBotVersions", + "lex:getBuiltinIntent", + "lex:getBuiltinIntents", + "lex:getBuiltinSlotTypes", + "lex:getIntent", + "lex:getIntents", + "lex:getIntentVersions", + "lex:getSlotType", + "lex:getSlotTypes", + "lex:getSlotTypeVersions", + "lightsail:getActiveNames", + "lightsail:getBlueprints", + "lightsail:getBundles", + "lightsail:getDomain", + "lightsail:getDomains", + "lightsail:getInstance", + "lightsail:getInstanceAccessDetails", + "lightsail:getInstanceMetricData", + "lightsail:getInstancePortStates", + "lightsail:getInstances", + "lightsail:getInstanceSnapshot", + "lightsail:getInstanceSnapshots", + "lightsail:getInstanceState", + "lightsail:getKeyPair", + "lightsail:getKeyPairs", + "lightsail:getOperation", + "lightsail:getOperations", + "lightsail:getOperationsForResource", + "lightsail:getRegions", + "lightsail:getStaticIp", + "lightsail:getStaticIps", + "logs:describeDestinations", + "logs:describeExportTasks", + "logs:describeLogGroups", + "logs:describeLogStreams", + "logs:describeMetricFilters", + "logs:describeSubscriptionFilters", + "logs:testMetricFilter", + "machinelearning:describeBatchPredictions", + "machinelearning:describeDataSources", + "machinelearning:describeEvaluations", + "machinelearning:describeMLModels", + "machinelearning:getBatchPrediction", + "machinelearning:getDataSource", + "machinelearning:getEvaluation", + "machinelearning:getMLModel", + "mediaconvert:describeEndpoints", + "mediaconvert:getJob", + "mediaconvert:getJobTemplate", + "mediaconvert:getPreset", + "mediaconvert:getQueue", + "mediaconvert:listJobs", + "mediaconvert:listJobTemplates", + "medialive:describeChannel", + "medialive:describeInput", + "medialive:describeInputSecurityGroup", + "medialive:describeOffering", + "medialive:describeReservation", + "medialive:describeSchedule", + "medialive:listChannels", + "medialive:listInputs", + "medialive:listInputSecurityGroups", + "medialive:listOfferings", + "mediapackage:describeChannel", + "mediapackage:describeOriginEndpoint", + "mediapackage:listChannels", + "mediapackage:listOriginEndpoints", + "mediastore:describeContainer", + "mediastore:getContainerPolicy", + "mediastore:listContainers", + "mobiletargeting:getApnsChannel", + "mobiletargeting:getApplicationSettings", + "mobiletargeting:getCampaign", + "mobiletargeting:getCampaignActivities", + "mobiletargeting:getCampaigns", + "mobiletargeting:getCampaignVersion", + "mobiletargeting:getCampaignVersions", + "mobiletargeting:getEndpoint", + "mobiletargeting:getGcmChannel", + "mobiletargeting:getImportJob", + "mobiletargeting:getImportJobs", + "mobiletargeting:getSegment", + "mobiletargeting:getSegmentImportJobs", + "mobiletargeting:getSegments", + "mobiletargeting:getSegmentVersion", + "mobiletargeting:getSegmentVersions", + "mq:describeBroker", + "mq:describeConfiguration", + "mq:describeConfigurationRevision", + "mq:describeUser", + "mq:listBrokers", + "mq:listConfigurationRevisions", + "mq:listConfigurations", + "mq:listUsers", + "opsworks-cm:describeAccountAttributes", + "opsworks-cm:describeBackups", + "opsworks-cm:describeEvents", + "opsworks-cm:describeNodeAssociationStatus", + "opsworks-cm:describeServers", + "opsworks:describeAgentVersions", + "opsworks:describeApps", + "opsworks:describeCommands", + "opsworks:describeDeployments", + "opsworks:describeEcsClusters", + "opsworks:describeElasticIps", + "opsworks:describeElasticLoadBalancers", + "opsworks:describeInstances", + "opsworks:describeLayers", + "opsworks:describeLoadBasedAutoScaling", + "opsworks:describeMyUserProfile", + "opsworks:describePermissions", + "opsworks:describeRaidArrays", + "opsworks:describeRdsDbInstances", + "opsworks:describeServiceErrors", + "opsworks:describeStackProvisioningParameters", + "opsworks:describeStacks", + "opsworks:describeStackSummary", + "opsworks:describeTimeBasedAutoScaling", + "opsworks:describeUserProfiles", + "opsworks:describeVolumes", + "opsworks:getHostnameSuggestion", + "polly:describeVoices", + "polly:getLexicon", + "polly:listLexicons", + "rds:describeAccountAttributes", + "rds:describeCertificates", + "rds:describeDBClusterParameterGroups", + "rds:describeDBClusterParameters", + "rds:describeDBClusters", + "rds:describeDBClusterSnapshots", + "rds:describeDBEngineVersions", + "rds:describeDBInstances", + "rds:describeDBParameterGroups", + "rds:describeDBParameters", + "rds:describeDBSecurityGroups", + "rds:describeDBSnapshotAttributes", + "rds:describeDBSnapshots", + "rds:describeDBSubnetGroups", + "rds:describeEngineDefaultClusterParameters", + "rds:describeEngineDefaultParameters", + "rds:describeEventCategories", + "rds:describeEvents", + "rds:describeEventSubscriptions", + "rds:describeOptionGroupOptions", + "rds:describeOptionGroups", + "rds:describeOrderableDBInstanceOptions", + "rds:describePendingMaintenanceActions", + "rds:describeReservedDBInstances", + "rds:describeReservedDBInstancesOfferings", + "rds:listTagsForResource", + "redshift:describeClusterParameterGroups", + "redshift:describeClusterParameters", + "redshift:describeClusters", + "redshift:describeClusterSecurityGroups", + "redshift:describeClusterSnapshots", + "redshift:describeClusterSubnetGroups", + "redshift:describeClusterVersions", + "redshift:describeDefaultClusterParameters", + "redshift:describeEventCategories", + "redshift:describeEvents", + "redshift:describeEventSubscriptions", + "redshift:describeHsmClientCertificates", + "redshift:describeHsmConfigurations", + "redshift:describeLoggingStatus", + "redshift:describeOrderableClusterOptions", + "redshift:describeReservedNodeOfferings", + "redshift:describeReservedNodes", + "redshift:describeResize", + "redshift:describeSnapshotCopyGrants", + "redshift:describeTableRestoreStatus", + "redshift:describeTags", + "rekognition:listCollections", + "rekognition:listFaces", + "robomaker:describeDeploymentJob", + "robomaker:describeFleet", + "robomaker:describeRobotApplication", + "robomaker:describeSimulationApplication", + "robomaker:describeSimulationJob", + "robomaker:listDeploymentJobs", + "robomaker:listFleets", + "robomaker:listRobotApplications", + "robomaker:listRobots", + "robomaker:listSimulationApplications", + "robomaker:listSimulationJobs", + "route53:getChange", + "route53:getCheckerIpRanges", + "route53:getGeoLocation", + "route53:getHealthCheck", + "route53:getHealthCheckCount", + "route53:getHealthCheckLastFailureReason", + "route53:getHealthCheckStatus", + "route53:getHostedZone", + "route53:getHostedZoneCount", + "route53:getReusableDelegationSet", + "route53:getTrafficPolicy", + "route53:getTrafficPolicyInstance", + "route53:getTrafficPolicyInstanceCount", + "route53:listGeoLocations", + "route53:listHealthChecks", + "route53:listHostedZones", + "route53:listHostedZonesByName", + "route53:listResourceRecordSets", + "route53:listReusableDelegationSets", + "route53:listTagsForResource", + "route53:listTagsForResources", + "route53:listTrafficPolicies", + "route53:listTrafficPolicyInstances", + "route53:listTrafficPolicyInstancesByHostedZone", + "route53:listTrafficPolicyInstancesByPolicy", + "route53:listTrafficPolicyVersions", + "route53domains:checkDomainAvailability", + "route53domains:getContactReachabilityStatus", + "route53domains:getDomainDetail", + "route53domains:getOperationDetail", + "route53domains:listDomains", + "route53domains:listOperations", + "route53domains:listTagsForDomain", + "route53domains:viewBilling", + "route53resolver:getResolverRulePolicy", + "route53resolver:listResolverEndpointIpAddresses", + "route53resolver:listResolverEndpoints", + "route53resolver:listResolverRuleAssociations", + "route53resolver:listResolverRules", + "route53resolver:listTagsForResource", + "s3:getAccelerateConfiguration", + "s3:getAnalyticsConfiguration", + "s3:getBucketAcl", + "s3:getBucketCORS", + "s3:getBucketLocation", + "s3:getBucketLogging", + "s3:getBucketNotification", + "s3:getBucketPolicy", + "s3:getBucketRequestPayment", + "s3:getBucketTagging", + "s3:getBucketVersioning", + "s3:getBucketWebsite", + "s3:getEncryptionConfiguration", + "s3:getInventoryConfiguration", + "s3:getLifecycleConfiguration", + "s3:getMetricsConfiguration", + "s3:getReplicationConfiguration", + "s3:headBucket", + "s3:listAllMyBuckets", + "s3:listBucketMultipartUploads", + "sagemaker:describeEndpoint", + "sagemaker:describeEndpointConfig", + "sagemaker:describeHyperParameterTuningJob", + "sagemaker:describeModel", + "sagemaker:describeNotebookInstance", + "sagemaker:describeNotebookInstanceLifecycleConfig", + "sagemaker:describeTrainingJob", + "sagemaker:describeTransformJob", + "sagemaker:listEndpointConfigs", + "sagemaker:listEndpoints", + "sagemaker:listHyperParameterTuningJobs", + "sagemaker:listModels", + "sagemaker:listNotebookInstanceLifecycleConfigs", + "sagemaker:listNotebookInstances", + "sagemaker:listTags", + "sagemaker:listTrainingJobs", + "sagemaker:listTrainingJobsForHyperParameterTuningJob", + "sagemaker:listTransformJobs", + "sdb:domainMetadata", + "sdb:listDomains", + "secretsmanager:describeSecret", + "secretsmanager:getResourcePolicy", + "secretsmanager:listSecrets", + "secretsmanager:listSecretVersionIds", + "servicecatalog:describeConstraint", + "servicecatalog:describePortfolio", + "servicecatalog:describeProduct", + "servicecatalog:describeProductAsAdmin", + "servicecatalog:describeProductView", + "servicecatalog:describeProvisioningArtifact", + "servicecatalog:describeProvisioningParameters", + "servicecatalog:describeRecord", + "servicecatalog:listAcceptedPortfolioShares", + "servicecatalog:listConstraintsForPortfolio", + "servicecatalog:listLaunchPaths", + "servicecatalog:listPortfolioAccess", + "servicecatalog:listPortfolios", + "servicecatalog:listPortfoliosForProduct", + "servicecatalog:listPrincipalsForPortfolio", + "servicecatalog:listProvisioningArtifacts", + "servicecatalog:listRecordHistory", + "servicecatalog:scanProvisionedProducts", + "servicecatalog:searchProducts", + "ses:describeActiveReceiptRuleSet", + "ses:describeReceiptRule", + "ses:describeReceiptRuleSet", + "ses:getIdentityDkimAttributes", + "ses:getIdentityMailFromDomainAttributes", + "ses:getIdentityNotificationAttributes", + "ses:getIdentityPolicies", + "ses:getIdentityVerificationAttributes", + "ses:getSendQuota", + "ses:getSendStatistics", + "ses:listIdentities", + "ses:listIdentityPolicies", + "ses:listReceiptFilters", + "ses:listReceiptRuleSets", + "ses:listVerifiedEmailAddresses", + "shield:describeAttack", + "shield:describeProtection", + "shield:describeSubscription", + "shield:listAttacks", + "shield:listProtections", + "sms:getConnectors", + "sms:getReplicationJobs", + "sms:getReplicationRuns", + "sms:getServers", + "snowball:describeAddress", + "snowball:describeAddresses", + "snowball:describeJob", + "snowball:getSnowballUsage", + "snowball:listJobs", + "sns:checkIfPhoneNumberIsOptedOut", + "sns:getEndpointAttributes", + "sns:getPlatformApplicationAttributes", + "sns:getSMSAttributes", + "sns:getSubscriptionAttributes", + "sns:getTopicAttributes", + "sns:listEndpointsByPlatformApplication", + "sns:listPhoneNumbersOptedOut", + "sns:listPlatformApplications", + "sns:listSubscriptions", + "sns:listSubscriptionsByTopic", + "sns:listTopics", + "sqs:getQueueAttributes", + "sqs:getQueueUrl", + "sqs:listDeadLetterSourceQueues", + "sqs:listQueues", + "ssm:describeActivations", + "ssm:describeAssociation", + "ssm:describeAutomationExecutions", + "ssm:describeAvailablePatches", + "ssm:describeDocument", + "ssm:describeDocumentPermission", + "ssm:describeEffectiveInstanceAssociations", + "ssm:describeEffectivePatchesForPatchBaseline", + "ssm:describeInstanceAssociationsStatus", + "ssm:describeInstanceInformation", + "ssm:describeInstancePatches", + "ssm:describeInstancePatchStates", + "ssm:describeInstancePatchStatesForPatchGroup", + "ssm:describeMaintenanceWindowExecutions", + "ssm:describeMaintenanceWindowExecutionTaskInvocations", + "ssm:describeMaintenanceWindowExecutionTasks", + "ssm:describeMaintenanceWindows", + "ssm:describeMaintenanceWindowTargets", + "ssm:describeMaintenanceWindowTasks", + "ssm:describeParameters", + "ssm:describePatchBaselines", + "ssm:describePatchGroups", + "ssm:describePatchGroupState", + "ssm:getAutomationExecution", + "ssm:getCommandInvocation", + "ssm:getDefaultPatchBaseline", + "ssm:getDeployablePatchSnapshotForInstance", + "ssm:getDocument", + "ssm:getInventory", + "ssm:getInventorySchema", + "ssm:getMaintenanceWindow", + "ssm:getMaintenanceWindowExecution", + "ssm:getMaintenanceWindowExecutionTask", + "ssm:getParameterHistory", + "ssm:getParameters", + "ssm:getPatchBaseline", + "ssm:getPatchBaselineForPatchGroup", + "ssm:listAssociations", + "ssm:listCommandInvocations", + "ssm:listCommands", + "ssm:listDocuments", + "ssm:listDocumentVersions", + "ssm:listInventoryEntries", + "ssm:listTagsForResource", + "states:describeActivity", + "states:describeExecution", + "states:describeStateMachine", + "states:getExecutionHistory", + "states:listActivities", + "states:listExecutions", + "states:listStateMachines", + "storagegateway:describeBandwidthRateLimit", + "storagegateway:describeCache", + "storagegateway:describeCachediSCSIVolumes", + "storagegateway:describeGatewayInformation", + "storagegateway:describeMaintenanceStartTime", + "storagegateway:describeNFSFileShares", + "storagegateway:describeSMBFileShares", + "storagegateway:describeSMBSettings", + "storagegateway:describeSnapshotSchedule", + "storagegateway:describeStorediSCSIVolumes", + "storagegateway:describeTapeArchives", + "storagegateway:describeTapeRecoveryPoints", + "storagegateway:describeTapes", + "storagegateway:describeUploadBuffer", + "storagegateway:describeVTLDevices", + "storagegateway:describeWorkingStorage", + "storagegateway:listFileShares", + "storagegateway:listGateways", + "storagegateway:listLocalDisks", + "storagegateway:listTagsForResource", + "storagegateway:listTapes", + "storagegateway:listVolumeInitiators", + "storagegateway:listVolumeRecoveryPoints", + "storagegateway:listVolumes", + "swf:describeActivityType", + "swf:describeDomain", + "swf:describeWorkflowExecution", + "swf:describeWorkflowType", + "swf:getWorkflowExecutionHistory", + "swf:listActivityTypes", + "swf:listClosedWorkflowExecutions", + "swf:listDomains", + "swf:listOpenWorkflowExecutions", + "swf:listWorkflowTypes", + "transfer:describeServer", + "transfer:describeUser", + "transfer:listServers", + "transfer:listTagsForResource", + "transfer:listUsers", + "waf-regional:getByteMatchSet", + "waf-regional:getChangeTokenStatus", + "waf-regional:getIPSet", + "waf-regional:getRule", + "waf-regional:getSqlInjectionMatchSet", + "waf-regional:getWebACL", + "waf-regional:getWebACLForResource", + "waf-regional:listByteMatchSets", + "waf-regional:listIPSets", + "waf-regional:listResourcesForWebACL", + "waf-regional:listRules", + "waf-regional:listSqlInjectionMatchSets", + "waf-regional:listWebACLs", + "waf:getByteMatchSet", + "waf:getChangeTokenStatus", + "waf:getIPSet", + "waf:getRule", + "waf:getSampledRequests", + "waf:getSizeConstraintSet", + "waf:getSqlInjectionMatchSet", + "waf:getWebACL", + "waf:getXssMatchSet", + "waf:listByteMatchSets", + "waf:listIPSets", + "waf:listRules", + "waf:listSizeConstraintSets", + "waf:listSqlInjectionMatchSets", + "waf:listWebACLs", + "waf:listXssMatchSets", + "workdocs:checkAlias", + "workdocs:describeAvailableDirectories", + "workdocs:describeInstances", + "workspaces:describeTags", + "workspaces:describeWorkspaceBundles", + "workspaces:describeWorkspaceDirectories", + "workspaces:describeWorkspaces", + "workspaces:describeWorkspacesConnectionStatus" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": false, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ7W6266ELXF5MISDS", + "PolicyName": "AWSSupportServiceRolePolicy", + "UpdateDate": "2019-02-06T18:06:11+00:00", + "VersionId": "v4" + }, + "AWSTransferLoggingAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/AWSTransferLoggingAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-14T15:32:50+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:CreateLogGroup", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAISIP5WGJX7VKXRQZO", + "PolicyName": "AWSTransferLoggingAccess", + "UpdateDate": "2019-01-14T15:32:50+00:00", + "VersionId": "v1" + }, + "AWSTrustedAdvisorServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSTrustedAdvisorServiceRolePolicy", + "AttachmentCount": 1, + "CreateDate": "2018-02-22T21:24:25+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:DescribeAccountLimits", + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "cloudformation:DescribeAccountLimits", + "cloudformation:DescribeStacks", + "cloudformation:ListStacks", + "cloudfront:ListDistributions", + "cloudtrail:DescribeTrails", + "cloudtrail:GetTrailStatus", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTable", + "dynamodb:ListTables", + "ec2:DescribeAddresses", + "ec2:DescribeReservedInstances", + "ec2:DescribeInstances", + "ec2:DescribeVpcs", + "ec2:DescribeInternetGateways", + "ec2:DescribeImages", + "ec2:DescribeVolumes", + "ec2:DescribeSecurityGroups", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeSnapshots", + "ec2:DescribeVpnConnections", + "ec2:DescribeVpnGateways", + "ec2:DescribeLaunchTemplateVersions", + "elasticloadbalancing:DescribeInstanceHealth", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancerPolicyTypes", + "elasticloadbalancing:DescribeLoadBalancers", + "iam:GenerateCredentialReport", + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary", + "iam:GetCredentialReport", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kinesis:DescribeLimits", + "rds:DescribeAccountAttributes", + "rds:DescribeDBClusters", + "rds:DescribeDBEngineVersions", + "rds:DescribeDBInstances", + "rds:DescribeDBParameterGroups", + "rds:DescribeDBParameters", + "rds:DescribeDBSecurityGroups", + "rds:DescribeDBSnapshots", + "rds:DescribeDBSubnetGroups", + "rds:DescribeEngineDefaultParameters", + "rds:DescribeEvents", + "rds:DescribeOptionGroupOptions", + "rds:DescribeOptionGroups", + "rds:DescribeOrderableDBInstanceOptions", + "rds:DescribeReservedDBInstances", + "rds:DescribeReservedDBInstancesOfferings", + "rds:ListTagsForResource", + "redshift:DescribeClusters", + "redshift:DescribeReservedNodeOfferings", + "redshift:DescribeReservedNodes", + "route53:GetAccountLimit", + "route53:GetHealthCheck", + "route53:GetHostedZone", + "route53:ListHealthChecks", + "route53:ListHostedZones", + "route53:ListHostedZonesByName", + "route53:ListResourceRecordSets", + "s3:GetBucketAcl", + "s3:GetBucketPolicy", + "s3:GetBucketPolicyStatus", + "s3:GetBucketLocation", + "s3:GetBucketLogging", + "s3:GetBucketVersioning", + "s3:GetBucketPublicAccessBlock", + "s3:ListBucket", + "s3:ListObjects", + "s3:ListAllMyBuckets", + "ses:GetSendQuota", + "sqs:ListQueues", + "cloudwatch:GetMetricStatistics" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJH4QJ2WMHBOB47BUE", + "PolicyName": "AWSTrustedAdvisorServiceRolePolicy", + "UpdateDate": "2019-01-22T19:58:36+00:00", + "VersionId": "v5" + }, + "AWSVPCTransitGatewayServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AWSVPCTransitGatewayServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-26T16:21:17+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DeleteNetworkInterface", + "ec2:CreateNetworkInterfacePermission" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "0" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJS2PBJSYV2EZW3MIQ", + "PolicyName": "AWSVPCTransitGatewayServiceRolePolicy", + "UpdateDate": "2018-11-26T16:21:17+00:00", + "VersionId": "v1" + }, "AWSWAFFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSWAFFullAccess", "AttachmentCount": 0, - "CreateDate": "2016-12-07T21:33:25+00:00", + "CreateDate": "2015-10-06T20:44:00+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -5364,6 +13684,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJMIKIAFXZEGOLRH7C", "PolicyName": "AWSWAFFullAccess", "UpdateDate": "2016-12-07T21:33:25+00:00", @@ -5372,7 +13693,7 @@ aws_managed_policies_data = """ "AWSWAFReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AWSWAFReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-12-07T21:30:54+00:00", + "CreateDate": "2015-10-06T20:43:45+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -5392,11 +13713,44 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAINZVDMX2SBF7EU2OC", "PolicyName": "AWSWAFReadOnlyAccess", "UpdateDate": "2016-12-07T21:30:54+00:00", "VersionId": "v2" }, + "AWSXRayDaemonWriteAccess": { + "Arn": "arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess", + "AttachmentCount": 0, + "CreateDate": "2018-08-28T23:00:33+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "xray:PutTraceSegments", + "xray:PutTelemetryRecords", + "xray:GetSamplingRules", + "xray:GetSamplingTargets", + "xray:GetSamplingStatisticSummaries" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIOE47HSUE5AVBNEDM", + "PolicyName": "AWSXRayDaemonWriteAccess", + "UpdateDate": "2018-08-28T23:00:33+00:00", + "VersionId": "v1" + }, "AWSXrayFullAccess": { "Arn": "arn:aws:iam::aws:policy/AWSXrayFullAccess", "AttachmentCount": 0, @@ -5419,6 +13773,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJQBYG45NSJMVQDB2K", "PolicyName": "AWSXrayFullAccess", "UpdateDate": "2016-12-01T18:30:55+00:00", @@ -5428,15 +13783,21 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AWSXrayReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2016-12-01T18:27:02+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v4", "Document": { "Statement": [ { "Action": [ + "xray:GetSamplingRules", + "xray:GetSamplingTargets", + "xray:GetSamplingStatisticSummaries", "xray:BatchGetTraces", "xray:GetServiceGraph", "xray:GetTraceGraph", - "xray:GetTraceSummaries" + "xray:GetTraceSummaries", + "xray:GetGroups", + "xray:GetGroup", + "xray:GetTimeSeriesServiceStatistics" ], "Effect": "Allow", "Resource": [ @@ -5449,22 +13810,26 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIH4OFXWPS6ZX6OPGQ", "PolicyName": "AWSXrayReadOnlyAccess", - "UpdateDate": "2016-12-01T18:27:02+00:00", - "VersionId": "v1" + "UpdateDate": "2019-04-30T18:11:46+00:00", + "VersionId": "v4" }, "AWSXrayWriteOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess", "AttachmentCount": 0, "CreateDate": "2016-12-01T18:19:53+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ "xray:PutTraceSegments", - "xray:PutTelemetryRecords" + "xray:PutTelemetryRecords", + "xray:GetSamplingRules", + "xray:GetSamplingTargets", + "xray:GetSamplingStatisticSummaries" ], "Effect": "Allow", "Resource": [ @@ -5477,14 +13842,15 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIAACM4LMYSRGBCTM6", "PolicyName": "AWSXrayWriteOnlyAccess", - "UpdateDate": "2016-12-01T18:19:53+00:00", - "VersionId": "v1" + "UpdateDate": "2018-08-28T23:03:04+00:00", + "VersionId": "v2" }, "AdministratorAccess": { "Arn": "arn:aws:iam::aws:policy/AdministratorAccess", - "AttachmentCount": 3, + "AttachmentCount": 1, "CreateDate": "2015-02-06T18:39:46+00:00", "DefaultVersionId": "v1", "Document": { @@ -5500,14 +13866,242 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIWMBCKSKIEE64ZLYK", "PolicyName": "AdministratorAccess", "UpdateDate": "2015-02-06T18:39:46+00:00", "VersionId": "v1" }, + "AlexaForBusinessDeviceSetup": { + "Arn": "arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup", + "AttachmentCount": 0, + "CreateDate": "2017-11-30T16:47:16+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "a4b:RegisterDevice", + "a4b:CompleteRegistration", + "a4b:SearchDevices", + "a4b:SearchNetworkProfiles", + "a4b:GetNetworkProfile", + "a4b:PutDeviceSetupEvents" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "secretsmanager:GetSecretValue" + ], + "Effect": "Allow", + "Resource": "arn:aws:secretsmanager:*:*:secret:A4BNetworkProfile*", + "Sid": "A4bDeviceSetupAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIUEFZFUTDTY4HGFU2", + "PolicyName": "AlexaForBusinessDeviceSetup", + "UpdateDate": "2019-05-20T21:05:39+00:00", + "VersionId": "v2" + }, + "AlexaForBusinessFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AlexaForBusinessFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-30T16:47:09+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "a4b:*", + "kms:DescribeKey" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "*a4b.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/*a4b.amazonaws.com/AWSServiceRoleForAlexaForBusiness*" + }, + { + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:DeleteSecret", + "secretsmanager:UpdateSecret" + ], + "Effect": "Allow", + "Resource": "arn:aws:secretsmanager:*:*:secret:A4BNetworkProfile*" + }, + { + "Action": "secretsmanager:CreateSecret", + "Condition": { + "StringLike": { + "secretsmanager:Name": "A4BNetworkProfile*" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAILUT3JGG7WRIMVNH2", + "PolicyName": "AlexaForBusinessFullAccess", + "UpdateDate": "2019-05-20T21:32:33+00:00", + "VersionId": "v4" + }, + "AlexaForBusinessGatewayExecution": { + "Arn": "arn:aws:iam::aws:policy/AlexaForBusinessGatewayExecution", + "AttachmentCount": 0, + "CreateDate": "2017-11-30T16:47:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "a4b:Send*", + "a4b:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws:a4b:*:*:gateway/*" + }, + { + "Action": [ + "sqs:ReceiveMessage", + "sqs:DeleteMessage" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sqs:*:*:dd-*", + "arn:aws:sqs:*:*:sd-*" + ] + }, + { + "Action": [ + "a4b:List*", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI3LZ7YP7KHLG4DT2Q", + "PolicyName": "AlexaForBusinessGatewayExecution", + "UpdateDate": "2017-11-30T16:47:19+00:00", + "VersionId": "v1" + }, + "AlexaForBusinessNetworkProfileServicePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AlexaForBusinessNetworkProfileServicePolicy", + "AttachmentCount": 0, + "CreateDate": "2019-03-13T00:53:40+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "acm-pca:GetCertificate", + "acm-pca:IssueCertificate", + "acm-pca:RevokeCertificate" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/a4b": "enabled" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "A4bPcaTagAccess" + }, + { + "Action": [ + "secretsmanager:GetSecretValue" + ], + "Effect": "Allow", + "Resource": "arn:aws:secretsmanager:*:*:secret:A4BNetworkProfile*", + "Sid": "A4bNetworkProfileAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI7GYBNGIZU2EDSMGQ", + "PolicyName": "AlexaForBusinessNetworkProfileServicePolicy", + "UpdateDate": "2019-04-05T21:57:56+00:00", + "VersionId": "v2" + }, + "AlexaForBusinessReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AlexaForBusinessReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-30T16:47:12+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "a4b:Get*", + "a4b:List*", + "a4b:Describe*", + "a4b:Search*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI6BKSTB4XMLPBFFJ2", + "PolicyName": "AlexaForBusinessReadOnlyAccess", + "UpdateDate": "2018-06-25T23:52:33+00:00", + "VersionId": "v2" + }, "AmazonAPIGatewayAdministrator": { "Arn": "arn:aws:iam::aws:policy/AmazonAPIGatewayAdministrator", - "AttachmentCount": 0, + "AttachmentCount": 1, "CreateDate": "2015-07-09T17:34:45+00:00", "DefaultVersionId": "v1", "Document": { @@ -5525,6 +14119,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ4PT6VY5NLKTNUYSI", "PolicyName": "AmazonAPIGatewayAdministrator", "UpdateDate": "2015-07-09T17:34:45+00:00", @@ -5534,12 +14129,13 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AmazonAPIGatewayInvokeFullAccess", "AttachmentCount": 0, "CreateDate": "2015-07-09T17:36:12+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ - "execute-api:Invoke" + "execute-api:Invoke", + "execute-api:ManageConnections" ], "Effect": "Allow", "Resource": "arn:aws:execute-api:*:*:*" @@ -5550,14 +14146,15 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIIWAX2NOOQJ4AIEQ6", "PolicyName": "AmazonAPIGatewayInvokeFullAccess", - "UpdateDate": "2015-07-09T17:36:12+00:00", - "VersionId": "v1" + "UpdateDate": "2018-12-18T18:25:10+00:00", + "VersionId": "v2" }, "AmazonAPIGatewayPushToCloudWatchLogs": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonAPIGatewayPushToCloudWatchLogs", - "AttachmentCount": 0, + "AttachmentCount": 1, "CreateDate": "2015-11-11T23:41:46+00:00", "DefaultVersionId": "v1", "Document": { @@ -5581,6 +14178,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIK4GFO7HLKYN64ASK", "PolicyName": "AmazonAPIGatewayPushToCloudWatchLogs", "UpdateDate": "2015-11-11T23:41:46+00:00", @@ -5589,8 +14187,8 @@ aws_managed_policies_data = """ "AmazonAppStreamFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonAppStreamFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-09-07T23:56:23+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2015-02-06T18:40:09+00:00", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -5645,6 +14243,16 @@ aws_managed_policies_data = """ }, "Effect": "Allow", "Resource": "arn:aws:iam::*:role/service-role/ApplicationAutoScalingForAmazonAppStreamAccess" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "appstream.application-autoscaling.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/appstream.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_AppStreamFleet" } ], "Version": "2012-10-17" @@ -5652,15 +14260,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJLZZXU2YQVGL4QDNC", "PolicyName": "AmazonAppStreamFullAccess", - "UpdateDate": "2017-09-07T23:56:23+00:00", - "VersionId": "v2" + "UpdateDate": "2018-09-10T17:29:25+00:00", + "VersionId": "v3" }, "AmazonAppStreamReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonAppStreamReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-12-07T21:00:06+00:00", + "CreateDate": "2015-02-06T18:40:10+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -5679,6 +14288,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJXIFDGB4VBX23DX7K", "PolicyName": "AmazonAppStreamReadOnlyAccess", "UpdateDate": "2016-12-07T21:00:06+00:00", @@ -5687,8 +14297,8 @@ aws_managed_policies_data = """ "AmazonAppStreamServiceAccess": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonAppStreamServiceAccess", "AttachmentCount": 0, - "CreateDate": "2017-05-23T23:00:47+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2016-11-19T04:17:37+00:00", + "DefaultVersionId": "v5", "Document": { "Statement": [ { @@ -5703,7 +14313,8 @@ aws_managed_policies_data = """ "ec2:AssociateAddress", "ec2:DisassociateAddress", "ec2:DescribeRouteTables", - "ec2:DescribeSecurityGroups" + "ec2:DescribeSecurityGroups", + "s3:ListAllMyBuckets" ], "Effect": "Allow", "Resource": "*" @@ -5717,10 +14328,15 @@ aws_managed_policies_data = """ "s3:DeleteObject", "s3:GetObjectVersion", "s3:DeleteObjectVersion", - "s3:PutBucketPolicy" + "s3:PutBucketPolicy", + "s3:PutEncryptionConfiguration" ], "Effect": "Allow", - "Resource": "arn:aws:s3:::appstream2-36fb080bb8-*" + "Resource": [ + "arn:aws:s3:::appstream2-36fb080bb8-*", + "arn:aws:s3:::appstream-app-settings-*", + "arn:aws:s3:::appstream-logs-*" + ] } ], "Version": "2012-10-17" @@ -5728,16 +14344,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAISBRZ7LMMCBYEF3SE", "PolicyName": "AmazonAppStreamServiceAccess", - "UpdateDate": "2017-05-23T23:00:47+00:00", - "VersionId": "v3" + "UpdateDate": "2019-01-17T20:22:45+00:00", + "VersionId": "v5" }, "AmazonAthenaFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonAthenaFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-09-13T00:13:48+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2016-11-30T16:46:01+00:00", + "DefaultVersionId": "v5", "Document": { "Statement": [ { @@ -5794,12 +14411,45 @@ aws_managed_policies_data = """ }, { "Action": [ - "s3:GetObject" + "s3:GetObject", + "s3:ListBucket" ], "Effect": "Allow", "Resource": [ "arn:aws:s3:::athena-examples*" ] + }, + { + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation", + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "sns:ListTopics", + "sns:GetTopicAttributes" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "cloudwatch:PutMetricAlarm", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] } ], "Version": "2012-10-17" @@ -5807,10 +14457,156 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIPJMLMD4C7RYZ6XCK", "PolicyName": "AmazonAthenaFullAccess", - "UpdateDate": "2017-09-13T00:13:48+00:00", - "VersionId": "v3" + "UpdateDate": "2019-02-19T00:13:03+00:00", + "VersionId": "v5" + }, + "AmazonChimeFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonChimeFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-01T22:15:43+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "chime:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIUJFSAKUERNORYRWO", + "PolicyName": "AmazonChimeFullAccess", + "UpdateDate": "2017-11-01T22:15:43+00:00", + "VersionId": "v1" + }, + "AmazonChimeReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AmazonChimeReadOnly", + "AttachmentCount": 0, + "CreateDate": "2017-11-01T22:04:17+00:00", + "DefaultVersionId": "v6", + "Document": { + "Statement": [ + { + "Action": [ + "chime:ListAccounts", + "chime:GetAccount", + "chime:GetAccountSettings", + "chime:ListUsers", + "chime:GetUser", + "chime:GetUserByEmail", + "chime:ListDomains", + "chime:GetDomain", + "chime:ListGroups", + "chime:ListDirectories", + "chime:ListCDRBucket", + "chime:GetCDRBucket", + "chime:ListDelegates", + "chime:GetAccountResource", + "chime:ValidateDelegate", + "chime:ListAccountUsageReportData", + "chime:GetUserActivityReportData", + "chime:GetGlobalSettings", + "chime:GetPhoneNumber", + "chime:GetPhoneNumberOrder", + "chime:GetUserSettings", + "chime:GetVoiceConnector", + "chime:GetVoiceConnectorOrigination", + "chime:GetVoiceConnectorTermination", + "chime:GetVoiceConnectorTerminationHealth", + "chime:ListPhoneNumberOrders", + "chime:ListPhoneNumbers", + "chime:ListVoiceConnectorTerminationCredentials", + "chime:ListVoiceConnectors", + "chime:SearchAvailablePhoneNumbers", + "chime:GetTelephonyLimits", + "chime:ListCallingRegions", + "chime:GetBot", + "chime:ListBots", + "chime:GetEventsConfiguration" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJLBFZZFABRXVWRTCI", + "PolicyName": "AmazonChimeReadOnly", + "UpdateDate": "2019-05-13T20:34:08+00:00", + "VersionId": "v6" + }, + "AmazonChimeUserManagement": { + "Arn": "arn:aws:iam::aws:policy/AmazonChimeUserManagement", + "AttachmentCount": 0, + "CreateDate": "2017-11-01T22:17:26+00:00", + "DefaultVersionId": "v6", + "Document": { + "Statement": [ + { + "Action": [ + "chime:ListAccounts", + "chime:GetAccount", + "chime:GetAccountSettings", + "chime:UpdateAccountSettings", + "chime:ListUsers", + "chime:GetUser", + "chime:GetUserByEmail", + "chime:InviteUsers", + "chime:SuspendUsers", + "chime:ActivateUsers", + "chime:UpdateUserLicenses", + "chime:ResetPersonalPIN", + "chime:LogoutUser", + "chime:ListDomains", + "chime:GetDomain", + "chime:ListDirectories", + "chime:ListGroups", + "chime:SubmitSupportRequest", + "chime:ListDelegates", + "chime:ListAccountUsageReportData", + "chime:GetMeetingDetail", + "chime:ListMeetingEvents", + "chime:ListMeetingsReportData", + "chime:GetUserActivityReportData", + "chime:UpdateUser", + "chime:BatchUpdateUser", + "chime:BatchSuspendUser", + "chime:BatchUnsuspendUser", + "chime:AssociatePhoneNumberWithUser", + "chime:DisassociatePhoneNumberFromUser", + "chime:GetPhoneNumber", + "chime:ListPhoneNumbers", + "chime:GetUserSettings", + "chime:UpdateUserSettings" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJGLHVUHNMQPSDGSOO", + "PolicyName": "AmazonChimeUserManagement", + "UpdateDate": "2019-03-18T12:17:58+00:00", + "VersionId": "v6" }, "AmazonCloudDirectoryFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonCloudDirectoryFullAccess", @@ -5834,6 +14630,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJG3XQK77ATFLCF2CK", "PolicyName": "AmazonCloudDirectoryFullAccess", "UpdateDate": "2017-02-25T00:41:39+00:00", @@ -5864,6 +14661,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAICMSZQGR3O62KMD6M", "PolicyName": "AmazonCloudDirectoryReadOnlyAccess", "UpdateDate": "2017-02-28T23:42:06+00:00", @@ -5892,16 +14690,51 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIQOKZ5BGKLCMTXH4W", "PolicyName": "AmazonCognitoDeveloperAuthenticatedIdentities", "UpdateDate": "2015-03-24T17:22:23+00:00", "VersionId": "v1" }, + "AmazonCognitoIdpEmailServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonCognitoIdpEmailServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2019-03-21T21:32:25+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ses:SendEmail", + "ses:SendRawEmail" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ses:List*" + ], + "Effect": "Deny", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIX7PW362PLAQFKBHM", + "PolicyName": "AmazonCognitoIdpEmailServiceRolePolicy", + "UpdateDate": "2019-03-21T21:32:25+00:00", + "VersionId": "v1" + }, "AmazonCognitoPowerUser": { "Arn": "arn:aws:iam::aws:policy/AmazonCognitoPowerUser", "AttachmentCount": 0, - "CreateDate": "2016-06-02T16:57:56+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2015-03-24T17:14:56+00:00", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -5915,6 +14748,24 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "email.cognito-idp.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/email.cognito-idp.amazonaws.com/AWSServiceRoleForAmazonCognitoIdpEmail*" } ], "Version": "2012-10-17" @@ -5922,16 +14773,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJKW5H2HNCPGCYGR6Y", "PolicyName": "AmazonCognitoPowerUser", - "UpdateDate": "2016-06-02T16:57:56+00:00", - "VersionId": "v2" + "UpdateDate": "2019-03-29T22:06:46+00:00", + "VersionId": "v3" }, "AmazonCognitoReadOnly": { "Arn": "arn:aws:iam::aws:policy/AmazonCognitoReadOnly", "AttachmentCount": 0, - "CreateDate": "2016-06-02T17:30:24+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2015-03-24T17:06:46+00:00", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -5941,6 +14793,7 @@ aws_managed_policies_data = """ "cognito-identity:List*", "cognito-idp:Describe*", "cognito-idp:AdminGetUser", + "cognito-idp:AdminList*", "cognito-idp:List*", "cognito-sync:Describe*", "cognito-sync:Get*", @@ -5958,9 +14811,141 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJBFTRZD2GQGJHSVQK", "PolicyName": "AmazonCognitoReadOnly", - "UpdateDate": "2016-06-02T17:30:24+00:00", + "UpdateDate": "2019-02-16T00:18:11+00:00", + "VersionId": "v3" + }, + "AmazonConnectFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonConnectFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-10-17T20:59:39+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "connect:*", + "ds:CreateAlias", + "ds:AuthorizeApplication", + "ds:CreateIdentityPoolDirectory", + "ds:DeleteDirectory", + "ds:DescribeDirectories", + "ds:UnauthorizeApplication", + "firehose:DescribeDeliveryStream", + "firehose:ListDeliveryStreams", + "kinesis:DescribeStream", + "kinesis:ListStreams", + "kms:DescribeKey", + "kms:CreateGrant", + "kms:ListAliases", + "lex:GetBots", + "logs:CreateLogGroup", + "s3:CreateBucket", + "s3:GetBucketLocation", + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "connect.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:PutRolePolicy" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/connect.amazonaws.com/AWSServiceRoleForAmazonConnect*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIPZZCFFD55NYGBAJI", + "PolicyName": "AmazonConnectFullAccess", + "UpdateDate": "2018-10-17T22:28:01+00:00", + "VersionId": "v2" + }, + "AmazonConnectReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonConnectReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-10-17T21:00:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "connect:Get*", + "connect:Describe*", + "connect:List*", + "ds:DescribeDirectories" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "connect:GetFederationTokens", + "Effect": "Deny", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIVZMH7VU6YYKRY6ZU", + "PolicyName": "AmazonConnectReadOnlyAccess", + "UpdateDate": "2018-10-17T21:00:44+00:00", + "VersionId": "v1" + }, + "AmazonConnectServiceLinkedRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonConnectServiceLinkedRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-09-07T00:21:43+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "connect:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:DeleteRole" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/connect.amazonaws.com/AWSServiceRoleForAmazonConnect_*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ6R6FMTSRUJSKI72Y", + "PolicyName": "AmazonConnectServiceLinkedRolePolicy", + "UpdateDate": "2018-09-25T21:29:18+00:00", "VersionId": "v2" }, "AmazonDMSCloudWatchLogsRole": { @@ -6026,6 +15011,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJBG7UXZZXUJD3TDJE", "PolicyName": "AmazonDMSCloudWatchLogsRole", "UpdateDate": "2016-01-07T23:44:53+00:00", @@ -6061,6 +15047,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI3CCUQ4U5WNC5F6B6", "PolicyName": "AmazonDMSRedshiftS3Role", "UpdateDate": "2016-04-20T17:05:56+00:00", @@ -6069,7 +15056,7 @@ aws_managed_policies_data = """ "AmazonDMSVPCManagementRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonDMSVPCManagementRole", "AttachmentCount": 0, - "CreateDate": "2016-05-23T16:29:57+00:00", + "CreateDate": "2015-11-18T16:33:19+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -6093,6 +15080,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJHKIGMBQI4AEFFSYO", "PolicyName": "AmazonDMSVPCManagementRole", "UpdateDate": "2016-05-23T16:29:57+00:00", @@ -6130,16 +15118,394 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJPXIBTTZMBEFEX6UA", "PolicyName": "AmazonDRSVPCManagement", "UpdateDate": "2015-09-02T00:09:20+00:00", "VersionId": "v1" }, + "AmazonDocDBConsoleFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonDocDBConsoleFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-09T20:37:28+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "rds:AddRoleToDBCluster", + "rds:AddSourceIdentifierToSubscription", + "rds:AddTagsToResource", + "rds:ApplyPendingMaintenanceAction", + "rds:CopyDBClusterParameterGroup", + "rds:CopyDBClusterSnapshot", + "rds:CopyDBParameterGroup", + "rds:CreateDBCluster", + "rds:CreateDBClusterParameterGroup", + "rds:CreateDBClusterSnapshot", + "rds:CreateDBInstance", + "rds:CreateDBParameterGroup", + "rds:CreateDBSubnetGroup", + "rds:CreateEventSubscription", + "rds:DeleteDBCluster", + "rds:DeleteDBClusterParameterGroup", + "rds:DeleteDBClusterSnapshot", + "rds:DeleteDBInstance", + "rds:DeleteDBParameterGroup", + "rds:DeleteDBSubnetGroup", + "rds:DeleteEventSubscription", + "rds:DescribeAccountAttributes", + "rds:DescribeCertificates", + "rds:DescribeDBClusterParameterGroups", + "rds:DescribeDBClusterParameters", + "rds:DescribeDBClusterSnapshotAttributes", + "rds:DescribeDBClusterSnapshots", + "rds:DescribeDBClusters", + "rds:DescribeDBEngineVersions", + "rds:DescribeDBInstances", + "rds:DescribeDBLogFiles", + "rds:DescribeDBParameterGroups", + "rds:DescribeDBParameters", + "rds:DescribeDBSecurityGroups", + "rds:DescribeDBSubnetGroups", + "rds:DescribeEngineDefaultClusterParameters", + "rds:DescribeEngineDefaultParameters", + "rds:DescribeEventCategories", + "rds:DescribeEventSubscriptions", + "rds:DescribeEvents", + "rds:DescribeOptionGroups", + "rds:DescribeOrderableDBInstanceOptions", + "rds:DescribePendingMaintenanceActions", + "rds:DescribeValidDBInstanceModifications", + "rds:DownloadDBLogFilePortion", + "rds:FailoverDBCluster", + "rds:ListTagsForResource", + "rds:ModifyDBCluster", + "rds:ModifyDBClusterParameterGroup", + "rds:ModifyDBClusterSnapshotAttribute", + "rds:ModifyDBInstance", + "rds:ModifyDBParameterGroup", + "rds:ModifyDBSubnetGroup", + "rds:ModifyEventSubscription", + "rds:PromoteReadReplicaDBCluster", + "rds:RebootDBInstance", + "rds:RemoveRoleFromDBCluster", + "rds:RemoveSourceIdentifierFromSubscription", + "rds:RemoveTagsFromResource", + "rds:ResetDBClusterParameterGroup", + "rds:ResetDBParameterGroup", + "rds:RestoreDBClusterFromSnapshot", + "rds:RestoreDBClusterToPointInTime" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "iam:GetRole", + "cloudwatch:GetMetricData", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "ec2:AllocateAddress", + "ec2:AssignIpv6Addresses", + "ec2:AssignPrivateIpAddresses", + "ec2:AssociateAddress", + "ec2:AssociateRouteTable", + "ec2:AssociateSubnetCidrBlock", + "ec2:AssociateVpcCidrBlock", + "ec2:AttachInternetGateway", + "ec2:AttachNetworkInterface", + "ec2:CreateCustomerGateway", + "ec2:CreateDefaultSubnet", + "ec2:CreateDefaultVpc", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateNetworkInterface", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateVpc", + "ec2:CreateVpcEndpoint", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeCustomerGateways", + "ec2:DescribeInstances", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePrefixLists", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroupReferences", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVpcAttribute", + "ec2:ModifyVpcEndpoint", + "kms:DescribeKey", + "kms:ListAliases", + "kms:ListKeyPolicies", + "kms:ListKeys", + "kms:ListKeysForService", + "kms:ListRetirableGrants", + "logs:DescribeLogStreams", + "logs:GetLogEvents", + "sns:ListSubscriptions", + "sns:ListTopics", + "sns:Publish" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "rds.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/rds.amazonaws.com/AWSServiceRoleForRDS" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJHV6VMSNDDHJ3ESNI", + "PolicyName": "AmazonDocDBConsoleFullAccess", + "UpdateDate": "2019-01-09T20:37:28+00:00", + "VersionId": "v1" + }, + "AmazonDocDBFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonDocDBFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-09T20:21:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "rds:AddRoleToDBCluster", + "rds:AddSourceIdentifierToSubscription", + "rds:AddTagsToResource", + "rds:ApplyPendingMaintenanceAction", + "rds:CopyDBClusterParameterGroup", + "rds:CopyDBClusterSnapshot", + "rds:CopyDBParameterGroup", + "rds:CreateDBCluster", + "rds:CreateDBClusterParameterGroup", + "rds:CreateDBClusterSnapshot", + "rds:CreateDBInstance", + "rds:CreateDBParameterGroup", + "rds:CreateDBSubnetGroup", + "rds:CreateEventSubscription", + "rds:DeleteDBCluster", + "rds:DeleteDBClusterParameterGroup", + "rds:DeleteDBClusterSnapshot", + "rds:DeleteDBInstance", + "rds:DeleteDBParameterGroup", + "rds:DeleteDBSubnetGroup", + "rds:DeleteEventSubscription", + "rds:DescribeAccountAttributes", + "rds:DescribeCertificates", + "rds:DescribeDBClusterParameterGroups", + "rds:DescribeDBClusterParameters", + "rds:DescribeDBClusterSnapshotAttributes", + "rds:DescribeDBClusterSnapshots", + "rds:DescribeDBClusters", + "rds:DescribeDBEngineVersions", + "rds:DescribeDBInstances", + "rds:DescribeDBLogFiles", + "rds:DescribeDBParameterGroups", + "rds:DescribeDBParameters", + "rds:DescribeDBSecurityGroups", + "rds:DescribeDBSubnetGroups", + "rds:DescribeEngineDefaultClusterParameters", + "rds:DescribeEngineDefaultParameters", + "rds:DescribeEventCategories", + "rds:DescribeEventSubscriptions", + "rds:DescribeEvents", + "rds:DescribeOptionGroups", + "rds:DescribeOrderableDBInstanceOptions", + "rds:DescribePendingMaintenanceActions", + "rds:DescribeValidDBInstanceModifications", + "rds:DownloadDBLogFilePortion", + "rds:FailoverDBCluster", + "rds:ListTagsForResource", + "rds:ModifyDBCluster", + "rds:ModifyDBClusterParameterGroup", + "rds:ModifyDBClusterSnapshotAttribute", + "rds:ModifyDBInstance", + "rds:ModifyDBParameterGroup", + "rds:ModifyDBSubnetGroup", + "rds:ModifyEventSubscription", + "rds:PromoteReadReplicaDBCluster", + "rds:RebootDBInstance", + "rds:RemoveRoleFromDBCluster", + "rds:RemoveSourceIdentifierFromSubscription", + "rds:RemoveTagsFromResource", + "rds:ResetDBClusterParameterGroup", + "rds:ResetDBParameterGroup", + "rds:RestoreDBClusterFromSnapshot", + "rds:RestoreDBClusterToPointInTime" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "kms:ListAliases", + "kms:ListKeyPolicies", + "kms:ListKeys", + "kms:ListRetirableGrants", + "logs:DescribeLogStreams", + "logs:GetLogEvents", + "sns:ListSubscriptions", + "sns:ListTopics", + "sns:Publish" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "rds.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/rds.amazonaws.com/AWSServiceRoleForRDS" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIQKACUF6JJHALEG5K", + "PolicyName": "AmazonDocDBFullAccess", + "UpdateDate": "2019-01-09T20:21:44+00:00", + "VersionId": "v1" + }, + "AmazonDocDBReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonDocDBReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-09T20:30:28+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "rds:DescribeAccountAttributes", + "rds:DescribeCertificates", + "rds:DescribeDBClusterParameterGroups", + "rds:DescribeDBClusterParameters", + "rds:DescribeDBClusterSnapshotAttributes", + "rds:DescribeDBClusterSnapshots", + "rds:DescribeDBClusters", + "rds:DescribeDBEngineVersions", + "rds:DescribeDBInstances", + "rds:DescribeDBLogFiles", + "rds:DescribeDBParameterGroups", + "rds:DescribeDBParameters", + "rds:DescribeDBSubnetGroups", + "rds:DescribeEventCategories", + "rds:DescribeEventSubscriptions", + "rds:DescribeEvents", + "rds:DescribeOrderableDBInstanceOptions", + "rds:DescribePendingMaintenanceActions", + "rds:DownloadDBLogFilePortion", + "rds:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "kms:ListKeys", + "kms:ListRetirableGrants", + "kms:ListAliases", + "kms:ListKeyPolicies" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:DescribeLogStreams", + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/rds/*:log-stream:*", + "arn:aws:logs:*:*:log-group:/aws/docdb/*:log-stream:*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI477RMVACLTLWY5RQ", + "PolicyName": "AmazonDocDBReadOnlyAccess", + "UpdateDate": "2019-01-09T20:30:28+00:00", + "VersionId": "v1" + }, "AmazonDynamoDBFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-06-28T23:23:34+00:00", - "DefaultVersionId": "v5", + "CreateDate": "2015-02-06T18:40:11+00:00", + "DefaultVersionId": "v9", "Document": { "Statement": [ { @@ -6188,7 +15554,14 @@ aws_managed_policies_data = """ "lambda:CreateEventSourceMapping", "lambda:DeleteEventSourceMapping", "lambda:GetFunctionConfiguration", - "lambda:DeleteFunction" + "lambda:DeleteFunction", + "resource-groups:ListGroups", + "resource-groups:ListGroupResources", + "resource-groups:GetGroup", + "resource-groups:GetGroupQuery", + "resource-groups:DeleteGroup", + "resource-groups:CreateGroup", + "tag:GetResources" ], "Effect": "Allow", "Resource": "*" @@ -6207,6 +15580,22 @@ aws_managed_policies_data = """ }, "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringEquals": { + "iam:AWSServiceName": [ + "replication.dynamodb.amazonaws.com", + "dax.amazonaws.com", + "dynamodb.application-autoscaling.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -6214,15 +15603,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAINUGF2JSOSUY76KYA", "PolicyName": "AmazonDynamoDBFullAccess", - "UpdateDate": "2017-06-28T23:23:34+00:00", - "VersionId": "v5" + "UpdateDate": "2019-05-08T21:20:48+00:00", + "VersionId": "v9" }, "AmazonDynamoDBFullAccesswithDataPipeline": { "Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBFullAccesswithDataPipeline", "AttachmentCount": 0, - "CreateDate": "2015-11-12T02:17:42+00:00", + "CreateDate": "2015-02-06T18:40:14+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -6312,6 +15702,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ3ORT7KDISSXGHJXA", "PolicyName": "AmazonDynamoDBFullAccesswithDataPipeline", "UpdateDate": "2015-11-12T02:17:42+00:00", @@ -6320,8 +15711,8 @@ aws_managed_policies_data = """ "AmazonDynamoDBReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonDynamoDBReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-06-12T21:11:40+00:00", - "DefaultVersionId": "v5", + "CreateDate": "2015-02-06T18:40:12+00:00", + "DefaultVersionId": "v8", "Document": { "Statement": [ { @@ -6340,23 +15731,32 @@ aws_managed_policies_data = """ "datapipeline:ListPipelines", "datapipeline:QueryObjects", "dynamodb:BatchGetItem", - "dynamodb:DescribeTable", + "dynamodb:Describe*", + "dynamodb:List*", "dynamodb:GetItem", - "dynamodb:ListTables", "dynamodb:Query", "dynamodb:Scan", - "dynamodb:DescribeReservedCapacity", - "dynamodb:DescribeReservedCapacityOfferings", - "dynamodb:ListTagsOfResource", - "dynamodb:DescribeTimeToLive", - "dynamodb:DescribeLimits", + "dax:Describe*", + "dax:List*", + "dax:GetItem", + "dax:BatchGetItem", + "dax:Query", + "dax:Scan", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", "iam:GetRole", "iam:ListRoles", "sns:ListSubscriptionsByTopic", "sns:ListTopics", "lambda:ListFunctions", "lambda:ListEventSourceMappings", - "lambda:GetFunctionConfiguration" + "lambda:GetFunctionConfiguration", + "resource-groups:ListGroups", + "resource-groups:ListGroupResources", + "resource-groups:GetGroup", + "resource-groups:GetGroupQuery", + "tag:GetResources" ], "Effect": "Allow", "Resource": "*" @@ -6367,21 +15767,23 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIY2XFNA232XJ6J7X2", "PolicyName": "AmazonDynamoDBReadOnlyAccess", - "UpdateDate": "2017-06-12T21:11:40+00:00", - "VersionId": "v5" + "UpdateDate": "2019-05-08T21:15:48+00:00", + "VersionId": "v8" }, "AmazonEC2ContainerRegistryFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryFullAccess", "AttachmentCount": 0, "CreateDate": "2015-12-21T17:06:48+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ - "ecr:*" + "ecr:*", + "cloudtrail:LookupEvents" ], "Effect": "Allow", "Resource": "*" @@ -6392,15 +15794,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIESRL7KD7IIVF6V4W", "PolicyName": "AmazonEC2ContainerRegistryFullAccess", - "UpdateDate": "2015-12-21T17:06:48+00:00", - "VersionId": "v1" + "UpdateDate": "2017-11-10T17:54:49+00:00", + "VersionId": "v2" }, "AmazonEC2ContainerRegistryPowerUser": { "Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryPowerUser", "AttachmentCount": 0, - "CreateDate": "2016-10-11T22:28:07+00:00", + "CreateDate": "2015-12-21T17:05:33+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -6428,6 +15831,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJDNE5PIHROIBGGDDW", "PolicyName": "AmazonEC2ContainerRegistryPowerUser", "UpdateDate": "2016-10-11T22:28:07+00:00", @@ -6436,7 +15840,7 @@ aws_managed_policies_data = """ "AmazonEC2ContainerRegistryReadOnly": { "Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", "AttachmentCount": 0, - "CreateDate": "2016-10-11T22:08:43+00:00", + "CreateDate": "2015-12-21T17:04:15+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -6460,6 +15864,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIFYZPA37OOHVIH7KQ", "PolicyName": "AmazonEC2ContainerRegistryReadOnly", "UpdateDate": "2016-10-11T22:08:43+00:00", @@ -6467,9 +15872,9 @@ aws_managed_policies_data = """ }, "AmazonEC2ContainerServiceAutoscaleRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceAutoscaleRole", - "AttachmentCount": 1, + "AttachmentCount": 0, "CreateDate": "2016-05-12T23:25:44+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -6484,7 +15889,8 @@ aws_managed_policies_data = """ }, { "Action": [ - "cloudwatch:DescribeAlarms" + "cloudwatch:DescribeAlarms", + "cloudwatch:PutMetricAlarm" ], "Effect": "Allow", "Resource": [ @@ -6497,16 +15903,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIUAP3EGGGXXCPDQKK", "PolicyName": "AmazonEC2ContainerServiceAutoscaleRole", - "UpdateDate": "2016-05-12T23:25:44+00:00", - "VersionId": "v1" + "UpdateDate": "2018-02-05T19:15:15+00:00", + "VersionId": "v2" }, "AmazonEC2ContainerServiceEventsRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceEventsRole", "AttachmentCount": 0, "CreateDate": "2017-05-30T16:51:35+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -6517,6 +15924,18 @@ aws_managed_policies_data = """ "Resource": [ "*" ] + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": "ecs-tasks.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] } ], "Version": "2012-10-17" @@ -6524,15 +15943,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAITKFNIUAG27VSYNZ4", "PolicyName": "AmazonEC2ContainerServiceEventsRole", - "UpdateDate": "2017-05-30T16:51:35+00:00", - "VersionId": "v1" + "UpdateDate": "2018-05-22T19:13:11+00:00", + "VersionId": "v2" }, "AmazonEC2ContainerServiceFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonEC2ContainerServiceFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-06-08T00:18:56+00:00", + "CreateDate": "2015-04-24T16:54:35+00:00", "DefaultVersionId": "v4", "Document": { "Statement": [ @@ -6568,6 +15988,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJALOYVTPDZEMIACSM", "PolicyName": "AmazonEC2ContainerServiceFullAccess", "UpdateDate": "2017-06-08T00:18:56+00:00", @@ -6575,8 +15996,8 @@ aws_managed_policies_data = """ }, "AmazonEC2ContainerServiceRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceRole", - "AttachmentCount": 1, - "CreateDate": "2016-08-11T13:08:01+00:00", + "AttachmentCount": 0, + "CreateDate": "2015-04-09T16:14:19+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -6599,6 +16020,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJO53W2XHNACG7V77Q", "PolicyName": "AmazonEC2ContainerServiceRole", "UpdateDate": "2016-08-11T13:08:01+00:00", @@ -6606,8 +16028,8 @@ aws_managed_policies_data = """ }, "AmazonEC2ContainerServiceforEC2Role": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role", - "AttachmentCount": 1, - "CreateDate": "2017-05-17T23:09:13+00:00", + "AttachmentCount": 0, + "CreateDate": "2015-03-19T18:45:18+00:00", "DefaultVersionId": "v5", "Document": { "Statement": [ @@ -6637,6 +16059,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJLYJCVHC7TQHCSQDS", "PolicyName": "AmazonEC2ContainerServiceforEC2Role", "UpdateDate": "2017-05-17T23:09:13+00:00", @@ -6644,9 +16067,9 @@ aws_managed_policies_data = """ }, "AmazonEC2FullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonEC2FullAccess", - "AttachmentCount": 1, + "AttachmentCount": 0, "CreateDate": "2015-02-06T18:40:15+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v5", "Document": { "Statement": [ { @@ -6668,6 +16091,23 @@ aws_managed_policies_data = """ "Action": "autoscaling:*", "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": [ + "autoscaling.amazonaws.com", + "ec2scheduled.amazonaws.com", + "elasticloadbalancing.amazonaws.com", + "spot.amazonaws.com", + "spotfleet.amazonaws.com", + "transitgateway.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -6675,10 +16115,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI3VAJF5ZCRZ7MCQE6", "PolicyName": "AmazonEC2FullAccess", - "UpdateDate": "2015-02-06T18:40:15+00:00", - "VersionId": "v1" + "UpdateDate": "2018-11-27T02:16:56+00:00", + "VersionId": "v5" }, "AmazonEC2ReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess", @@ -6717,6 +16158,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIGDT4SV4GSETWTBZK", "PolicyName": "AmazonEC2ReadOnlyAccess", "UpdateDate": "2015-02-06T18:40:17+00:00", @@ -6740,6 +16182,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIU6NBZVF2PCRW36ZW", "PolicyName": "AmazonEC2ReportsAccess", "UpdateDate": "2015-02-06T18:40:16+00:00", @@ -6748,7 +16191,7 @@ aws_managed_policies_data = """ "AmazonEC2RoleforAWSCodeDeploy": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforAWSCodeDeploy", "AttachmentCount": 0, - "CreateDate": "2017-03-20T17:14:10+00:00", + "CreateDate": "2015-05-19T18:10:14+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -6767,6 +16210,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIAZKXZ27TAJ4PVWGK", "PolicyName": "AmazonEC2RoleforAWSCodeDeploy", "UpdateDate": "2017-03-20T17:14:10+00:00", @@ -6775,7 +16219,7 @@ aws_managed_policies_data = """ "AmazonEC2RoleforDataPipelineRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforDataPipelineRole", "AttachmentCount": 0, - "CreateDate": "2016-02-22T17:24:05+00:00", + "CreateDate": "2015-02-06T18:41:25+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -6808,6 +16252,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ3Z5I2WAJE5DN2J36", "PolicyName": "AmazonEC2RoleforDataPipelineRole", "UpdateDate": "2016-02-22T17:24:05+00:00", @@ -6816,8 +16261,8 @@ aws_managed_policies_data = """ "AmazonEC2RoleforSSM": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM", "AttachmentCount": 0, - "CreateDate": "2017-08-10T20:49:08+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2015-05-29T17:48:35+00:00", + "DefaultVersionId": "v8", "Document": { "Statement": [ { @@ -6825,11 +16270,14 @@ aws_managed_policies_data = """ "ssm:DescribeAssociation", "ssm:GetDeployablePatchSnapshotForInstance", "ssm:GetDocument", + "ssm:DescribeDocument", + "ssm:GetManifest", "ssm:GetParameters", "ssm:ListAssociations", "ssm:ListInstanceAssociations", "ssm:PutInventory", "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", "ssm:UpdateAssociationStatus", "ssm:UpdateInstanceAssociationStatus", "ssm:UpdateInstanceInformation" @@ -6837,6 +16285,16 @@ aws_managed_policies_data = """ "Effect": "Allow", "Resource": "*" }, + { + "Action": [ + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel" + ], + "Effect": "Allow", + "Resource": "*" + }, { "Action": [ "ec2messages:AcknowledgeMessage", @@ -6884,21 +16342,17 @@ aws_managed_policies_data = """ }, { "Action": [ + "s3:GetBucketLocation", "s3:PutObject", "s3:GetObject", + "s3:GetEncryptionConfiguration", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts", + "s3:ListBucket", "s3:ListBucketMultipartUploads" ], "Effect": "Allow", "Resource": "*" - }, - { - "Action": [ - "s3:ListBucket" - ], - "Effect": "Allow", - "Resource": "arn:aws:s3:::amazon-ssm-packages-*" } ], "Version": "2012-10-17" @@ -6906,16 +16360,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI6TL3SMY22S4KMMX6", "PolicyName": "AmazonEC2RoleforSSM", - "UpdateDate": "2017-08-10T20:49:08+00:00", - "VersionId": "v4" + "UpdateDate": "2019-01-24T19:20:51+00:00", + "VersionId": "v8" }, "AmazonEC2SpotFleetAutoscaleRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetAutoscaleRole", "AttachmentCount": 0, "CreateDate": "2016-08-19T18:27:22+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -6930,12 +16385,24 @@ aws_managed_policies_data = """ }, { "Action": [ - "cloudwatch:DescribeAlarms" + "cloudwatch:DescribeAlarms", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DeleteAlarms" ], "Effect": "Allow", "Resource": [ "*" ] + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "ec2.application-autoscaling.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/ec2.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_EC2SpotFleetRequest" } ], "Version": "2012-10-17" @@ -6943,16 +16410,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIMFFRMIOBGDP2TAVE", "PolicyName": "AmazonEC2SpotFleetAutoscaleRole", - "UpdateDate": "2016-08-19T18:27:22+00:00", - "VersionId": "v1" + "UpdateDate": "2019-02-18T19:17:03+00:00", + "VersionId": "v3" }, "AmazonEC2SpotFleetRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetRole", "AttachmentCount": 0, - "CreateDate": "2016-11-10T21:19:35+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2015-05-18T23:28:05+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -6968,6 +16436,24 @@ aws_managed_policies_data = """ "Resource": [ "*" ] + }, + { + "Action": [ + "elasticloadbalancing:RegisterInstancesWithLoadBalancer" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:loadbalancer/*" + ] + }, + { + "Action": [ + "elasticloadbalancing:RegisterTargets" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] } ], "Version": "2012-10-17" @@ -6975,16 +16461,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIMRTKHWK7ESSNETSW", "PolicyName": "AmazonEC2SpotFleetRole", - "UpdateDate": "2016-11-10T21:19:35+00:00", - "VersionId": "v3" + "UpdateDate": "2017-11-07T19:14:10+00:00", + "VersionId": "v4" }, "AmazonEC2SpotFleetTaggingRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonEC2SpotFleetTaggingRole", "AttachmentCount": 0, - "CreateDate": "2017-07-26T19:10:35+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2017-06-29T18:19:29+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -7005,13 +16492,34 @@ aws_managed_policies_data = """ "Action": "iam:PassRole", "Condition": { "StringEquals": { - "iam:PassedToService": "ec2.amazonaws.com" + "iam:PassedToService": [ + "ec2.amazonaws.com", + "ec2.amazonaws.com.cn" + ] } }, "Effect": "Allow", "Resource": [ "*" ] + }, + { + "Action": [ + "elasticloadbalancing:RegisterInstancesWithLoadBalancer" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:loadbalancer/*" + ] + }, + { + "Action": [ + "elasticloadbalancing:RegisterTargets" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] } ], "Version": "2012-10-17" @@ -7019,11 +16527,633 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ5U6UMLCEYLX5OLC4", "PolicyName": "AmazonEC2SpotFleetTaggingRole", - "UpdateDate": "2017-07-26T19:10:35+00:00", + "UpdateDate": "2017-11-17T22:51:17+00:00", + "VersionId": "v4" + }, + "AmazonECSServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonECSServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-10-14T01:18:58+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AttachNetworkInterface", + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", + "ec2:Describe*", + "ec2:DetachNetworkInterface", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:Describe*", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "route53:ChangeResourceRecordSets", + "route53:CreateHealthCheck", + "route53:DeleteHealthCheck", + "route53:Get*", + "route53:List*", + "route53:UpdateHealthCheck", + "servicediscovery:DeregisterInstance", + "servicediscovery:Get*", + "servicediscovery:List*", + "servicediscovery:RegisterInstance", + "servicediscovery:UpdateInstanceCustomHealthStatus" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "ECSTaskManagement" + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:network-interface/*", + "Sid": "ECSTagging" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIVUWKCAI7URU4WUEI", + "PolicyName": "AmazonECSServiceRolePolicy", + "UpdateDate": "2018-10-18T23:18:18+00:00", + "VersionId": "v5" + }, + "AmazonECSTaskExecutionRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-11-16T18:48:22+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ecr:GetAuthorizationToken", + "ecr:BatchCheckLayerAvailability", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJG4T4G4PV56DE72PY", + "PolicyName": "AmazonECSTaskExecutionRolePolicy", + "UpdateDate": "2017-11-16T18:48:22+00:00", + "VersionId": "v1" + }, + "AmazonECS_FullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonECS_FullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-07T21:36:54+00:00", + "DefaultVersionId": "v15", + "Document": { + "Statement": [ + { + "Action": [ + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:DeregisterScalableTarget", + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingActivities", + "application-autoscaling:DescribeScalingPolicies", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:RegisterScalableTarget", + "autoscaling:UpdateAutoScalingGroup", + "autoscaling:CreateAutoScalingGroup", + "autoscaling:CreateLaunchConfiguration", + "autoscaling:DeleteAutoScalingGroup", + "autoscaling:DeleteLaunchConfiguration", + "autoscaling:Describe*", + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:DescribeStack*", + "cloudformation:UpdateStack", + "cloudwatch:DescribeAlarms", + "cloudwatch:DeleteAlarms", + "cloudwatch:GetMetricStatistics", + "cloudwatch:PutMetricAlarm", + "codedeploy:CreateApplication", + "codedeploy:CreateDeployment", + "codedeploy:CreateDeploymentGroup", + "codedeploy:GetApplication", + "codedeploy:GetDeployment", + "codedeploy:GetDeploymentGroup", + "codedeploy:ListApplications", + "codedeploy:ListDeploymentGroups", + "codedeploy:ListDeployments", + "codedeploy:StopDeployment", + "codedeploy:GetDeploymentTarget", + "codedeploy:ListDeploymentTargets", + "codedeploy:GetDeploymentConfig", + "codedeploy:GetApplicationRevision", + "codedeploy:RegisterApplicationRevision", + "codedeploy:BatchGetApplicationRevisions", + "codedeploy:BatchGetDeploymentGroups", + "codedeploy:BatchGetDeployments", + "codedeploy:BatchGetApplications", + "codedeploy:ListApplicationRevisions", + "codedeploy:ListDeploymentConfigs", + "codedeploy:ContinueDeployment", + "sns:ListTopics", + "lambda:ListFunctions", + "ec2:AssociateRouteTable", + "ec2:AttachInternetGateway", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CancelSpotFleetRequests", + "ec2:CreateInternetGateway", + "ec2:CreateLaunchTemplate", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateVpc", + "ec2:DeleteLaunchTemplate", + "ec2:DeleteSubnet", + "ec2:DeleteVpc", + "ec2:Describe*", + "ec2:DetachInternetGateway", + "ec2:DisassociateRouteTable", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVpcAttribute", + "ec2:RunInstances", + "ec2:RequestSpotFleet", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateRule", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteRule", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeTargetGroups", + "ecs:*", + "events:DescribeRule", + "events:DeleteRule", + "events:ListRuleNamesByTarget", + "events:ListTargetsByRule", + "events:PutRule", + "events:PutTargets", + "events:RemoveTargets", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfiles", + "iam:ListRoles", + "logs:CreateLogGroup", + "logs:DescribeLogGroups", + "logs:FilterLogEvents", + "route53:GetHostedZone", + "route53:ListHostedZonesByName", + "route53:CreateHostedZone", + "route53:DeleteHostedZone", + "route53:GetHealthCheck", + "servicediscovery:CreatePrivateDnsNamespace", + "servicediscovery:CreateService", + "servicediscovery:GetNamespace", + "servicediscovery:GetOperation", + "servicediscovery:GetService", + "servicediscovery:ListNamespaces", + "servicediscovery:ListServices", + "servicediscovery:UpdateService", + "servicediscovery:DeleteService" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ssm:GetParametersByPath", + "ssm:GetParameters", + "ssm:GetParameter" + ], + "Effect": "Allow", + "Resource": "arn:aws:ssm:*:*:parameter/aws/service/ecs*" + }, + { + "Action": [ + "ec2:DeleteInternetGateway", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup" + ], + "Condition": { + "StringLike": { + "ec2:ResourceTag/aws:cloudformation:stack-name": "EC2ContainerService-*" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": "ecs-tasks.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "ec2.amazonaws.com", + "ec2.amazonaws.com.cn" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/ecsInstanceRole*" + ] + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "application-autoscaling.amazonaws.com", + "application-autoscaling.amazonaws.com.cn" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/ecsAutoscaleRole*" + ] + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "ecs.amazonaws.com", + "spot.amazonaws.com", + "spotfleet.amazonaws.com", + "ecs.application-autoscaling.amazonaws.com", + "autoscaling.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ7S7AN6YQPTJC7IFS", + "PolicyName": "AmazonECS_FullAccess", + "UpdateDate": "2019-02-04T18:44:48+00:00", + "VersionId": "v15" + }, + "AmazonEKSClusterPolicy": { + "Arn": "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + "AttachmentCount": 0, + "CreateDate": "2018-05-27T21:06:14+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:UpdateAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "ec2:DescribeDhcpOptions", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "kms:DescribeKey" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIBTLDQMIC6UOIGFWA", + "PolicyName": "AmazonEKSClusterPolicy", + "UpdateDate": "2019-05-22T22:04:46+00:00", + "VersionId": "v3" + }, + "AmazonEKSServicePolicy": { + "Arn": "arn:aws:iam::aws:policy/AmazonEKSServicePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-05-27T21:08:21+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterface", + "ec2:DescribeInstances", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "iam:ListAttachedRolePolicies" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:vpc/*", + "arn:aws:ec2:*:*:subnet/*" + ] + }, + { + "Action": "route53:AssociateVPCWithHostedZone", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "logs:CreateLogGroup", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:DescribeLogStreams" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/eks/*:*" + }, + { + "Action": "logs:PutLogEvents", + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/eks/*:*:*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJFCNXU6HPGCIVXYDI", + "PolicyName": "AmazonEKSServicePolicy", + "UpdateDate": "2019-02-26T21:01:48+00:00", + "VersionId": "v3" + }, + "AmazonEKSWorkerNodePolicy": { + "Arn": "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-05-27T21:09:01+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcs", + "eks:DescribeCluster" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIBVMOY52IPQ6HD3PO", + "PolicyName": "AmazonEKSWorkerNodePolicy", + "UpdateDate": "2018-05-27T21:09:01+00:00", + "VersionId": "v1" + }, + "AmazonEKS_CNI_Policy": { + "Arn": "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "AttachmentCount": 0, + "CreateDate": "2018-05-27T21:07:42+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AssignPrivateIpAddresses", + "ec2:AttachNetworkInterface", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DescribeInstances", + "ec2:DescribeNetworkInterfaces", + "ec2:DetachNetworkInterface", + "ec2:ModifyNetworkInterfaceAttribute" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateTags" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:network-interface/*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJWLAS474LDBXNNTM4", + "PolicyName": "AmazonEKS_CNI_Policy", + "UpdateDate": "2018-05-31T22:16:00+00:00", "VersionId": "v2" }, + "AmazonEMRCleanupPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonEMRCleanupPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-09-26T23:54:19+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeSpotInstanceRequests", + "ec2:ModifyInstanceAttribute", + "ec2:TerminateInstances", + "ec2:CancelSpotInstanceRequests", + "ec2:DeleteNetworkInterface", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeVolumeStatus", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ec2:DeleteVolume" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI4YEZURRMKACW56EA", + "PolicyName": "AmazonEMRCleanupPolicy", + "UpdateDate": "2017-09-26T23:54:19+00:00", + "VersionId": "v1" + }, + "AmazonESCognitoAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonESCognitoAccess", + "AttachmentCount": 0, + "CreateDate": "2018-02-28T22:29:18+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cognito-idp:DescribeUserPool", + "cognito-idp:CreateUserPoolClient", + "cognito-idp:DeleteUserPoolClient", + "cognito-idp:DescribeUserPoolClient", + "cognito-idp:AdminInitiateAuth", + "cognito-idp:AdminUserGlobalSignOut", + "cognito-idp:ListUserPoolClients", + "cognito-identity:DescribeIdentityPool", + "cognito-identity:UpdateIdentityPool", + "cognito-identity:SetIdentityPoolRoles", + "cognito-identity:GetIdentityPoolRoles" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": "cognito-identity.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJL2FUMODIGNDPTZHO", + "PolicyName": "AmazonESCognitoAccess", + "UpdateDate": "2018-02-28T22:29:18+00:00", + "VersionId": "v1" + }, "AmazonESFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonESFullAccess", "AttachmentCount": 0, @@ -7044,6 +17174,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJM6ZTCU24QL5PZCGC", "PolicyName": "AmazonESFullAccess", "UpdateDate": "2015-10-01T19:14:00+00:00", @@ -7053,13 +17184,14 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AmazonESReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2015-10-01T19:18:24+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ "es:Describe*", - "es:List*" + "es:List*", + "es:Get*" ], "Effect": "Allow", "Resource": "*" @@ -7070,22 +17202,33 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJUDMRLOQ7FPAR46FQ", "PolicyName": "AmazonESReadOnlyAccess", - "UpdateDate": "2015-10-01T19:18:24+00:00", - "VersionId": "v1" + "UpdateDate": "2018-10-03T03:32:56+00:00", + "VersionId": "v2" }, "AmazonElastiCacheFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonElastiCacheFullAccess", "AttachmentCount": 0, "CreateDate": "2015-02-06T18:40:20+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": "elasticache:*", "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "elasticache.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/elasticache.amazonaws.com/AWSServiceRoleForElastiCache" } ], "Version": "2012-10-17" @@ -7093,10 +17236,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIA2V44CPHAUAAECKG", "PolicyName": "AmazonElastiCacheFullAccess", - "UpdateDate": "2015-02-06T18:40:20+00:00", - "VersionId": "v1" + "UpdateDate": "2017-12-07T17:48:26+00:00", + "VersionId": "v2" }, "AmazonElastiCacheReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonElastiCacheReadOnlyAccess", @@ -7118,6 +17262,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIPDACSNQHSENWAKM2", "PolicyName": "AmazonElastiCacheReadOnlyAccess", "UpdateDate": "2015-02-06T18:40:21+00:00", @@ -7126,7 +17271,7 @@ aws_managed_policies_data = """ "AmazonElasticFileSystemFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonElasticFileSystemFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-14T10:18:34+00:00", + "CreateDate": "2015-05-27T16:22:28+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -7155,6 +17300,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJKXTMNVQGIDNCKPBC", "PolicyName": "AmazonElasticFileSystemFullAccess", "UpdateDate": "2017-08-14T10:18:34+00:00", @@ -7163,7 +17309,7 @@ aws_managed_policies_data = """ "AmazonElasticFileSystemReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonElasticFileSystemReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-14T10:09:49+00:00", + "CreateDate": "2015-05-27T16:25:25+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -7188,16 +17334,71 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIPN5S4NE5JJOKVC4Y", "PolicyName": "AmazonElasticFileSystemReadOnlyAccess", "UpdateDate": "2017-08-14T10:09:49+00:00", "VersionId": "v3" }, + "AmazonElasticMapReduceEditorsRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceEditorsRole", + "AttachmentCount": 0, + "CreateDate": "2018-11-16T21:55:25+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateSecurityGroup", + "ec2:DescribeSecurityGroups", + "ec2:RevokeSecurityGroupEgress", + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", + "ec2:DescribeNetworkInterfaces", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DescribeTags", + "ec2:DescribeInstances", + "ec2:DescribeSubnets", + "elasticmapreduce:ListInstances", + "elasticmapreduce:DescribeCluster" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "ForAllValues:StringEquals": { + "aws:TagKeys": [ + "aws:elasticmapreduce:editor-id", + "aws:elasticmapreduce:job-flow-id" + ] + } + }, + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:network-interface/*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIBI5CIE6OHUIGLYVG", + "PolicyName": "AmazonElasticMapReduceEditorsRole", + "UpdateDate": "2018-11-16T21:55:25+00:00", + "VersionId": "v1" + }, "AmazonElasticMapReduceFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-09-20T19:27:37+00:00", - "DefaultVersionId": "v5", + "CreateDate": "2015-02-06T18:40:22+00:00", + "DefaultVersionId": "v6", "Document": { "Statement": [ { @@ -7253,11 +17454,14 @@ aws_managed_policies_data = """ "Action": "iam:CreateServiceLinkedRole", "Condition": { "StringLike": { - "iam:AWSServiceName": "elasticmapreduce.amazonaws.com" + "iam:AWSServiceName": [ + "elasticmapreduce.amazonaws.com", + "elasticmapreduce.amazonaws.com.cn" + ] } }, "Effect": "Allow", - "Resource": "arn:aws:iam::*:role/aws-service-role/elasticmapreduce.amazonaws.com/AWSServiceRoleForEMRCleanup" + "Resource": "*" } ], "Version": "2012-10-17" @@ -7265,15 +17469,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIZP5JFP3AMSGINBB2", "PolicyName": "AmazonElasticMapReduceFullAccess", - "UpdateDate": "2017-09-20T19:27:37+00:00", - "VersionId": "v5" + "UpdateDate": "2018-01-23T19:40:00+00:00", + "VersionId": "v6" }, "AmazonElasticMapReduceReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonElasticMapReduceReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-05-22T23:00:19+00:00", + "CreateDate": "2015-02-06T18:40:23+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -7297,6 +17502,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIHP6NH2S6GYFCOINC", "PolicyName": "AmazonElasticMapReduceReadOnlyAccess", "UpdateDate": "2017-05-22T23:00:19+00:00", @@ -7305,8 +17511,8 @@ aws_managed_policies_data = """ "AmazonElasticMapReduceRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", "AttachmentCount": 0, - "CreateDate": "2017-07-17T21:29:50+00:00", - "DefaultVersionId": "v8", + "CreateDate": "2015-02-06T18:41:20+00:00", + "DefaultVersionId": "v9", "Document": { "Statement": [ { @@ -7377,6 +17583,16 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot*" } ], "Version": "2012-10-17" @@ -7384,10 +17600,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIDI2BQT2LKXZG36TW", "PolicyName": "AmazonElasticMapReduceRole", - "UpdateDate": "2017-07-17T21:29:50+00:00", - "VersionId": "v8" + "UpdateDate": "2017-12-12T00:47:45+00:00", + "VersionId": "v9" }, "AmazonElasticMapReduceforAutoScalingRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforAutoScalingRole", @@ -7411,6 +17628,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJSVXG6QHPE6VHDZ4Q", "PolicyName": "AmazonElasticMapReduceforAutoScalingRole", "UpdateDate": "2016-11-18T01:09:10+00:00", @@ -7419,7 +17637,7 @@ aws_managed_policies_data = """ "AmazonElasticMapReduceforEC2Role": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role", "AttachmentCount": 0, - "CreateDate": "2017-08-11T23:57:30+00:00", + "CreateDate": "2015-02-06T18:41:21+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -7481,108 +17699,12 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIGALS5RCDLZLB3PGS", "PolicyName": "AmazonElasticMapReduceforEC2Role", "UpdateDate": "2017-08-11T23:57:30+00:00", "VersionId": "v3" }, - "AmazonElasticTranscoderFullAccess": { - "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderFullAccess", - "AttachmentCount": 0, - "CreateDate": "2015-02-06T18:40:24+00:00", - "DefaultVersionId": "v1", - "Document": { - "Statement": [ - { - "Action": [ - "elastictranscoder:*", - "cloudfront:*", - "s3:List*", - "s3:Put*", - "s3:Get*", - "s3:*MultipartUpload*", - "iam:CreateRole", - "iam:GetRolePolicy", - "iam:PassRole", - "iam:PutRolePolicy", - "iam:List*", - "sns:CreateTopic", - "sns:List*" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - }, - "IsAttachable": true, - "IsDefaultVersion": true, - "Path": "/", - "PolicyId": "ANPAJ4D5OJU75P5ZJZVNY", - "PolicyName": "AmazonElasticTranscoderFullAccess", - "UpdateDate": "2015-02-06T18:40:24+00:00", - "VersionId": "v1" - }, - "AmazonElasticTranscoderJobsSubmitter": { - "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderJobsSubmitter", - "AttachmentCount": 0, - "CreateDate": "2015-02-06T18:40:25+00:00", - "DefaultVersionId": "v1", - "Document": { - "Statement": [ - { - "Action": [ - "elastictranscoder:Read*", - "elastictranscoder:List*", - "elastictranscoder:*Job", - "elastictranscoder:*Preset", - "s3:List*", - "iam:List*", - "sns:List*" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - }, - "IsAttachable": true, - "IsDefaultVersion": true, - "Path": "/", - "PolicyId": "ANPAIN5WGARIKZ3E2UQOU", - "PolicyName": "AmazonElasticTranscoderJobsSubmitter", - "UpdateDate": "2015-02-06T18:40:25+00:00", - "VersionId": "v1" - }, - "AmazonElasticTranscoderReadOnlyAccess": { - "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoderReadOnlyAccess", - "AttachmentCount": 0, - "CreateDate": "2015-02-06T18:40:26+00:00", - "DefaultVersionId": "v1", - "Document": { - "Statement": [ - { - "Action": [ - "elastictranscoder:Read*", - "elastictranscoder:List*", - "s3:List*", - "iam:List*", - "sns:List*" - ], - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - }, - "IsAttachable": true, - "IsDefaultVersion": true, - "Path": "/", - "PolicyId": "ANPAJGPP7GPMJRRJMEP3Q", - "PolicyName": "AmazonElasticTranscoderReadOnlyAccess", - "UpdateDate": "2015-02-06T18:40:26+00:00", - "VersionId": "v1" - }, "AmazonElasticTranscoderRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonElasticTranscoderRole", "AttachmentCount": 0, @@ -7633,16 +17755,128 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJNW3WMKVXFJ2KPIQ2", "PolicyName": "AmazonElasticTranscoderRole", "UpdateDate": "2015-02-06T18:41:26+00:00", "VersionId": "v1" }, + "AmazonElasticTranscoder_FullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoder_FullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-04-27T18:59:35+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "elastictranscoder:*", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:ListObjects", + "iam:ListRoles", + "sns:ListTopics" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": [ + "elastictranscoder.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAICFT6XVF3RSR4E7JG", + "PolicyName": "AmazonElasticTranscoder_FullAccess", + "UpdateDate": "2018-04-27T18:59:35+00:00", + "VersionId": "v1" + }, + "AmazonElasticTranscoder_JobsSubmitter": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoder_JobsSubmitter", + "AttachmentCount": 0, + "CreateDate": "2018-06-07T21:12:16+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "elastictranscoder:Read*", + "elastictranscoder:List*", + "elastictranscoder:*Job", + "elastictranscoder:*Preset", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:ListObjects", + "iam:ListRoles", + "sns:ListTopics" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ7AUMMRQOVZRI734S", + "PolicyName": "AmazonElasticTranscoder_JobsSubmitter", + "UpdateDate": "2018-06-07T21:12:16+00:00", + "VersionId": "v1" + }, + "AmazonElasticTranscoder_ReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonElasticTranscoder_ReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-06-07T21:09:56+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "elastictranscoder:Read*", + "elastictranscoder:List*", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:ListObjects", + "iam:ListRoles", + "sns:ListTopics" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI3R3CR6KVEWD4DPFY", + "PolicyName": "AmazonElasticTranscoder_ReadOnlyAccess", + "UpdateDate": "2018-06-07T21:09:56+00:00", + "VersionId": "v1" + }, "AmazonElasticsearchServiceRolePolicy": { "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonElasticsearchServiceRolePolicy", "AttachmentCount": 0, "CreateDate": "2017-07-07T00:15:31+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -7652,7 +17886,8 @@ aws_managed_policies_data = """ "ec2:DescribeNetworkInterfaces", "ec2:ModifyNetworkInterfaceAttribute", "ec2:DescribeSecurityGroups", - "ec2:DescribeSubnets" + "ec2:DescribeSubnets", + "ec2:DescribeVpcs" ], "Effect": "Allow", "Resource": "*", @@ -7664,9 +17899,347 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJFEWZPHXKLCVHEUIC", "PolicyName": "AmazonElasticsearchServiceRolePolicy", - "UpdateDate": "2017-07-07T00:15:31+00:00", + "UpdateDate": "2018-02-08T21:38:27+00:00", + "VersionId": "v2" + }, + "AmazonFSxConsoleFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonFSxConsoleFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T16:36:05+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ds:DescribeDirectories", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "fsx:*", + "kms:ListAliases", + "s3:HeadBucket" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "fsx.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "s3.data-source.lustre.fsx.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAITDDJ23Y5UZ2WCZRQ", + "PolicyName": "AmazonFSxConsoleFullAccess", + "UpdateDate": "2018-11-28T16:36:05+00:00", + "VersionId": "v1" + }, + "AmazonFSxConsoleReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonFSxConsoleReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T16:35:24+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ds:DescribeDirectories", + "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "fsx:Describe*", + "fsx:ListTagsForResource", + "kms:DescribeKey" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJQUISIZNHGLA6YQFM", + "PolicyName": "AmazonFSxConsoleReadOnlyAccess", + "UpdateDate": "2018-11-28T16:35:24+00:00", + "VersionId": "v1" + }, + "AmazonFSxFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonFSxFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T16:34:43+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ds:DescribeDirectories", + "fsx:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "fsx.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "s3.data-source.lustre.fsx.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIEUV6Z2X4VNZRVB5I", + "PolicyName": "AmazonFSxFullAccess", + "UpdateDate": "2018-11-28T16:34:43+00:00", + "VersionId": "v1" + }, + "AmazonFSxReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonFSxReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T16:33:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "fsx:Describe*", + "fsx:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ4ICPKXR6KK32HT52", + "PolicyName": "AmazonFSxReadOnlyAccess", + "UpdateDate": "2018-11-28T16:33:32+00:00", + "VersionId": "v1" + }, + "AmazonFSxServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonFSxServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T10:38:37+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:PutMetricData", + "ds:AuthorizeApplication", + "ds:UnauthorizeApplication", + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "route53:AssociateVPCWithHostedZone" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIVQ24YKVRBV5IYQ5G", + "PolicyName": "AmazonFSxServiceRolePolicy", + "UpdateDate": "2018-11-28T10:38:37+00:00", + "VersionId": "v1" + }, + "AmazonForecastFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonForecastFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-18T01:52:29+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "forecast:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": "forecast.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIAKOTFNTUECQVU7C4", + "PolicyName": "AmazonForecastFullAccess", + "UpdateDate": "2019-01-18T01:52:29+00:00", + "VersionId": "v1" + }, + "AmazonFreeRTOSFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonFreeRTOSFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T15:32:51+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "freertos:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJAN6PSDCOH6HXG2SE", + "PolicyName": "AmazonFreeRTOSFullAccess", + "UpdateDate": "2017-11-29T15:32:51+00:00", + "VersionId": "v1" + }, + "AmazonFreeRTOSOTAUpdate": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonFreeRTOSOTAUpdate", + "AttachmentCount": 0, + "CreateDate": "2018-08-27T22:43:07+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "s3:GetObjectVersion", + "s3:PutObject", + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::afr-ota*" + }, + { + "Action": [ + "signer:StartSigningJob", + "signer:DescribeSigningJob", + "signer:GetSigningProfile", + "signer:PutSigningProfile" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:ListBucket", + "s3:ListAllMyBuckets", + "s3:GetBucketLocation" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iot:DeleteJob" + ], + "Effect": "Allow", + "Resource": "arn:aws:iot:*:*:job/AFR_OTA*" + }, + { + "Action": [ + "iot:DeleteStream" + ], + "Effect": "Allow", + "Resource": "arn:aws:iot:*:*:stream/AFR_OTA*" + }, + { + "Action": [ + "iot:CreateStream", + "iot:CreateJob" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAINC2TXHAYDOK3SWMU", + "PolicyName": "AmazonFreeRTOSOTAUpdate", + "UpdateDate": "2018-08-27T22:43:07+00:00", "VersionId": "v1" }, "AmazonGlacierFullAccess": { @@ -7687,6 +18260,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJQSTZJWB2AXXAKHVQ", "PolicyName": "AmazonGlacierFullAccess", "UpdateDate": "2015-02-06T18:40:28+00:00", @@ -7695,7 +18269,7 @@ aws_managed_policies_data = """ "AmazonGlacierReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonGlacierReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-05-05T18:46:10+00:00", + "CreateDate": "2015-02-06T18:40:27+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -7723,16 +18297,105 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI2D5NJKMU274MET4E", "PolicyName": "AmazonGlacierReadOnlyAccess", "UpdateDate": "2016-05-05T18:46:10+00:00", "VersionId": "v2" }, + "AmazonGuardDutyFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonGuardDutyFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-28T22:31:30+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "guardduty:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "guardduty.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIKUTKSN4KC63VDQUM", + "PolicyName": "AmazonGuardDutyFullAccess", + "UpdateDate": "2017-11-28T22:31:30+00:00", + "VersionId": "v1" + }, + "AmazonGuardDutyReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonGuardDutyReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-28T22:29:40+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "guardduty:Get*", + "guardduty:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIVMCEDV336RWUSNHG", + "PolicyName": "AmazonGuardDutyReadOnlyAccess", + "UpdateDate": "2018-04-25T21:07:17+00:00", + "VersionId": "v2" + }, + "AmazonGuardDutyServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonGuardDutyServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-11-28T20:12:59+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeImages" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIHZREZOWNSSA6FWQO", + "PolicyName": "AmazonGuardDutyServiceRolePolicy", + "UpdateDate": "2017-11-28T20:12:59+00:00", + "VersionId": "v1" + }, "AmazonInspectorFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonInspectorFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-09-12T17:42:57+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2015-10-07T17:08:04+00:00", + "DefaultVersionId": "v5", "Document": { "Statement": [ { @@ -7746,6 +18409,30 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "inspector.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "inspector.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/inspector.amazonaws.com/AWSServiceRoleForAmazonInspector" } ], "Version": "2012-10-17" @@ -7753,15 +18440,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI7Y6NTA27NWNA5U5E", "PolicyName": "AmazonInspectorFullAccess", - "UpdateDate": "2017-09-12T17:42:57+00:00", - "VersionId": "v3" + "UpdateDate": "2017-12-21T14:53:31+00:00", + "VersionId": "v5" }, "AmazonInspectorReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonInspectorReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-09-12T16:53:06+00:00", + "CreateDate": "2015-10-07T17:08:01+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -7787,11 +18475,69 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJXQNTHTEJ2JFRN2SE", "PolicyName": "AmazonInspectorReadOnlyAccess", "UpdateDate": "2017-09-12T16:53:06+00:00", "VersionId": "v3" }, + "AmazonInspectorServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonInspectorServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-11-21T15:48:27+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "directconnect:DescribeConnections", + "directconnect:DescribeDirectConnectGateways", + "directconnect:DescribeDirectConnectGatewayAssociations", + "directconnect:DescribeDirectConnectGatewayAttachments", + "directconnect:DescribeVirtualGateways", + "directconnect:DescribeVirtualInterfaces", + "directconnect:DescribeTags", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeCustomerGateways", + "ec2:DescribeInstances", + "ec2:DescribeTags", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePrefixLists", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcPeeringConnections", + "ec2:DescribeVpcs", + "ec2:DescribeVpnConnections", + "ec2:DescribeVpnGateways", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJKBMSBWLU2TGXHHUQ", + "PolicyName": "AmazonInspectorServiceRolePolicy", + "UpdateDate": "2018-05-10T18:36:01+00:00", + "VersionId": "v4" + }, "AmazonKinesisAnalyticsFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonKinesisAnalyticsFullAccess", "AttachmentCount": 0, @@ -7856,6 +18602,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJQOSKHTXP43R7P5AC", "PolicyName": "AmazonKinesisAnalyticsFullAccess", "UpdateDate": "2016-09-21T19:01:14+00:00", @@ -7920,6 +18667,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIJIEXZAFUK43U7ARK", "PolicyName": "AmazonKinesisAnalyticsReadOnly", "UpdateDate": "2016-09-21T18:16:43+00:00", @@ -7945,6 +18693,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJMZQMTZ7FRBFHHAHI", "PolicyName": "AmazonKinesisFirehoseFullAccess", "UpdateDate": "2015-10-07T18:45:26+00:00", @@ -7971,6 +18720,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ36NT645INW4K24W6", "PolicyName": "AmazonKinesisFirehoseReadOnlyAccess", "UpdateDate": "2015-10-07T18:43:39+00:00", @@ -7994,6 +18744,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIVF32HAMOXCUYRAYE", "PolicyName": "AmazonKinesisFullAccess", "UpdateDate": "2015-02-06T18:40:29+00:00", @@ -8021,16 +18772,69 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIOCMTDT5RLKZ2CAJO", "PolicyName": "AmazonKinesisReadOnlyAccess", "UpdateDate": "2015-02-06T18:40:30+00:00", "VersionId": "v1" }, + "AmazonKinesisVideoStreamsFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonKinesisVideoStreamsFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-12-01T23:27:18+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "kinesisvideo:*", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIZAN5AK7E7UVYIAZY", + "PolicyName": "AmazonKinesisVideoStreamsFullAccess", + "UpdateDate": "2017-12-01T23:27:18+00:00", + "VersionId": "v1" + }, + "AmazonKinesisVideoStreamsReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonKinesisVideoStreamsReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-12-01T23:14:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "kinesisvideo:Describe*", + "kinesisvideo:Get*", + "kinesisvideo:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJDS2DKUCYTEA7M6UA", + "PolicyName": "AmazonKinesisVideoStreamsReadOnlyAccess", + "UpdateDate": "2017-12-01T23:14:32+00:00", + "VersionId": "v1" + }, "AmazonLexFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonLexFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-04-14T19:45:37+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2017-04-11T23:20:36+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -8089,6 +18893,16 @@ aws_managed_policies_data = """ "arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots" ] }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/lex.amazonaws.com/AWSServiceRoleForLexBots" + ] + }, { "Action": [ "iam:DetachRolePolicy" @@ -8117,6 +18931,16 @@ aws_managed_policies_data = """ "arn:aws:iam::*:role/aws-service-role/channels.lex.amazonaws.com/AWSServiceRoleForLexChannels" ] }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/channels.lex.amazonaws.com/AWSServiceRoleForLexChannels" + ] + }, { "Action": [ "iam:DetachRolePolicy" @@ -8137,10 +18961,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJVLXDHKVC23HRTKSI", "PolicyName": "AmazonLexFullAccess", - "UpdateDate": "2017-04-14T19:45:37+00:00", - "VersionId": "v3" + "UpdateDate": "2017-11-15T23:55:07+00:00", + "VersionId": "v4" }, "AmazonLexReadOnly": { "Arn": "arn:aws:iam::aws:policy/AmazonLexReadOnly", @@ -8178,6 +19003,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJGBI5LSMAJNDGBNAM", "PolicyName": "AmazonLexReadOnly", "UpdateDate": "2017-04-11T23:13:33+00:00", @@ -8204,11 +19030,253 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJVZGB5CM3N6YWJHBE", "PolicyName": "AmazonLexRunBotsOnly", "UpdateDate": "2017-04-11T23:06:24+00:00", "VersionId": "v1" }, + "AmazonMQApiFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMQApiFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-12-18T20:31:31+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mq:*", + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", + "ec2:DetachNetworkInterface", + "ec2:DescribeInternetGateways", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeNetworkInterfacePermissions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/amazonmq/*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI4CMO533EBV3L2GW4", + "PolicyName": "AmazonMQApiFullAccess", + "UpdateDate": "2018-12-18T20:31:31+00:00", + "VersionId": "v1" + }, + "AmazonMQApiReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMQApiReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-12-18T20:31:13+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "mq:Describe*", + "mq:List*", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIKI5JRHKAFHXQJKMO", + "PolicyName": "AmazonMQApiReadOnlyAccess", + "UpdateDate": "2018-12-18T20:31:13+00:00", + "VersionId": "v1" + }, + "AmazonMQFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMQFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-28T15:28:29+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "mq:*", + "cloudformation:CreateStack", + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", + "ec2:DetachNetworkInterface", + "ec2:DescribeInternetGateways", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeNetworkInterfacePermissions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:CreateSecurityGroup", + "ec2:AuthorizeSecurityGroupIngress" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/amazonmq/*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJLKBROJNQYDDXOOGG", + "PolicyName": "AmazonMQFullAccess", + "UpdateDate": "2018-12-18T20:33:17+00:00", + "VersionId": "v4" + }, + "AmazonMQReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMQReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-28T15:30:32+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "mq:Describe*", + "mq:List*", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJFH3NKGULDUU66D5C", + "PolicyName": "AmazonMQReadOnlyAccess", + "UpdateDate": "2017-11-28T19:02:03+00:00", + "VersionId": "v2" + }, + "AmazonMSKFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMSKFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-14T22:07:52+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "kafka:*", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeSecurityGroups", + "kms:DescribeKey", + "kms:CreateGrant" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "kafka.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/kafka.amazonaws.com/AWSServiceRoleForKafka*" + }, + { + "Action": [ + "iam:AttachRolePolicy", + "iam:PutRolePolicy" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/kafka.amazonaws.com/AWSServiceRoleForKafka*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJERQQQTWI5OMENTQE", + "PolicyName": "AmazonMSKFullAccess", + "UpdateDate": "2019-01-14T22:07:52+00:00", + "VersionId": "v1" + }, + "AmazonMSKReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMSKReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-14T22:28:45+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "kafka:Describe*", + "kafka:List*", + "kafka:Get*", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "kms:DescribeKey" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJGMUI3DP2EVP3VGYO", + "PolicyName": "AmazonMSKReadOnlyAccess", + "UpdateDate": "2019-01-14T22:28:45+00:00", + "VersionId": "v1" + }, "AmazonMachineLearningBatchPredictionsAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningBatchPredictionsAccess", "AttachmentCount": 0, @@ -8233,6 +19301,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAILOI4HTQSFTF3GQSC", "PolicyName": "AmazonMachineLearningBatchPredictionsAccess", "UpdateDate": "2015-04-09T17:12:19+00:00", @@ -8241,7 +19310,7 @@ aws_managed_policies_data = """ "AmazonMachineLearningCreateOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonMachineLearningCreateOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-06-29T20:55:03+00:00", + "CreateDate": "2015-04-09T17:18:09+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -8262,6 +19331,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJDRUNIC2RYAMAT3CK", "PolicyName": "AmazonMachineLearningCreateOnlyAccess", "UpdateDate": "2016-06-29T20:55:03+00:00", @@ -8287,6 +19357,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIWKW6AGSGYOQ5ERHC", "PolicyName": "AmazonMachineLearningFullAccess", "UpdateDate": "2015-04-09T17:25:41+00:00", @@ -8313,6 +19384,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJJL3PC3VCSVZP6OCI", "PolicyName": "AmazonMachineLearningManageRealTimeEndpointOnlyAccess", "UpdateDate": "2015-04-09T17:32:41+00:00", @@ -8339,6 +19411,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIW5VYBCGEX56JCINC", "PolicyName": "AmazonMachineLearningReadOnlyAccess", "UpdateDate": "2015-04-09T17:40:02+00:00", @@ -8364,6 +19437,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIWMCNQPRWMWT36GVQ", "PolicyName": "AmazonMachineLearningRealTimePredictionOnlyAccess", "UpdateDate": "2015-04-09T17:44:06+00:00", @@ -8404,6 +19478,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIQ5UDYYMNN42BM4AK", "PolicyName": "AmazonMachineLearningRoleforRedshiftDataSource", "UpdateDate": "2015-04-09T17:05:26+00:00", @@ -8413,7 +19488,7 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AmazonMacieFullAccess", "AttachmentCount": 0, "CreateDate": "2017-08-14T14:54:30+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -8422,6 +19497,16 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "macie.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -8429,9 +19514,39 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJJF2N5FR6S5TZN5OA", "PolicyName": "AmazonMacieFullAccess", - "UpdateDate": "2017-08-14T14:54:30+00:00", + "UpdateDate": "2018-06-28T15:54:57+00:00", + "VersionId": "v2" + }, + "AmazonMacieHandshakeRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonMacieHandshakeRole", + "AttachmentCount": 0, + "CreateDate": "2018-06-28T15:46:10+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "ForAnyValue:StringEquals": { + "iam:AWSServiceName": "macie.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ7CVEIVL347MLOVKI", + "PolicyName": "AmazonMacieHandshakeRole", + "UpdateDate": "2018-06-28T15:46:10+00:00", "VersionId": "v1" }, "AmazonMacieServiceRole": { @@ -8455,11 +19570,77 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJVV7PON3FPBL2PSGC", "PolicyName": "AmazonMacieServiceRole", "UpdateDate": "2017-08-14T14:53:26+00:00", "VersionId": "v1" }, + "AmazonMacieServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonMacieServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-06-19T22:17:38+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudtrail:DescribeTrails", + "cloudtrail:GetEventSelectors", + "cloudtrail:GetTrailStatus", + "cloudtrail:ListTags", + "cloudtrail:LookupEvents", + "iam:ListAccountAliases", + "s3:Get*", + "s3:List*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudtrail:CreateTrail", + "cloudtrail:StartLogging", + "cloudtrail:StopLogging", + "cloudtrail:UpdateTrail", + "cloudtrail:DeleteTrail", + "cloudtrail:PutEventSelectors" + ], + "Effect": "Allow", + "Resource": "arn:aws:cloudtrail:*:*:trail/AWSMacieTrail-DO-NOT-EDIT" + }, + { + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:DeleteBucketPolicy", + "s3:DeleteBucketWebsite", + "s3:DeleteObject", + "s3:DeleteObjectTagging", + "s3:DeleteObjectVersion", + "s3:DeleteObjectVersionTagging", + "s3:DeleteReplicationConfiguration", + "s3:PutBucketPolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::awsmacie-*", + "arn:aws:s3:::awsmacietrail-*", + "arn:aws:s3:::*-awsmacietrail-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJPLHONRH2HP2H6TNQ", + "PolicyName": "AmazonMacieServiceRolePolicy", + "UpdateDate": "2018-06-19T22:17:38+00:00", + "VersionId": "v1" + }, "AmazonMacieSetupRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonMacieSetupRole", "AttachmentCount": 0, @@ -8520,11 +19701,172 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ5DC6UBVKND7ADSKA", "PolicyName": "AmazonMacieSetupRole", "UpdateDate": "2017-08-14T14:53:34+00:00", "VersionId": "v1" }, + "AmazonManagedBlockchainConsoleFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonManagedBlockchainConsoleFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-04-29T21:23:25+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "managedblockchain:*", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:CreateVpcEndpoint", + "kms:ListAliases", + "kms:DescribeKey" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4ONVQBFILL", + "PolicyName": "AmazonManagedBlockchainConsoleFullAccess", + "UpdateDate": "2019-04-29T21:23:25+00:00", + "VersionId": "v1" + }, + "AmazonManagedBlockchainFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonManagedBlockchainFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-04-29T21:39:29+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "managedblockchain:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4CGBOJKRYD", + "PolicyName": "AmazonManagedBlockchainFullAccess", + "UpdateDate": "2019-04-29T21:39:29+00:00", + "VersionId": "v1" + }, + "AmazonManagedBlockchainReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonManagedBlockchainReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2019-04-30T18:17:31+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "managedblockchain:Get*", + "managedblockchain:List*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4OIIAURVWV", + "PolicyName": "AmazonManagedBlockchainReadOnlyAccess", + "UpdateDate": "2019-04-30T18:17:31+00:00", + "VersionId": "v1" + }, + "AmazonMechanicalTurkCrowdFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMechanicalTurkCrowdFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-10-05T18:07:21+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "crowd:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "CrowdApiFullAccess" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": "crowd.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIPM7C67S54NPAHQ4Q", + "PolicyName": "AmazonMechanicalTurkCrowdFullAccess", + "UpdateDate": "2018-09-28T21:08:53+00:00", + "VersionId": "v2" + }, + "AmazonMechanicalTurkCrowdReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonMechanicalTurkCrowdReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2017-10-05T18:10:56+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "crowd:GetTask" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "CrowdApiReadOnlyAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAID5UNRAAANDGAW4CY", + "PolicyName": "AmazonMechanicalTurkCrowdReadOnlyAccess", + "UpdateDate": "2017-10-05T18:10:56+00:00", + "VersionId": "v1" + }, "AmazonMechanicalTurkFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonMechanicalTurkFullAccess", "AttachmentCount": 0, @@ -8547,6 +19889,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJDGCL5BET73H5QIQC", "PolicyName": "AmazonMechanicalTurkFullAccess", "UpdateDate": "2015-12-11T19:08:19+00:00", @@ -8555,7 +19898,7 @@ aws_managed_policies_data = """ "AmazonMechanicalTurkReadOnly": { "Arn": "arn:aws:iam::aws:policy/AmazonMechanicalTurkReadOnly", "AttachmentCount": 0, - "CreateDate": "2017-02-27T21:45:50+00:00", + "CreateDate": "2015-12-11T19:08:28+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -8576,6 +19919,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIO5IY3G3WXSX5PPRM", "PolicyName": "AmazonMechanicalTurkReadOnly", "UpdateDate": "2017-02-27T21:45:50+00:00", @@ -8602,6 +19946,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJKJHO2R27TXKCWBU4", "PolicyName": "AmazonMobileAnalyticsFinancialReportAccess", "UpdateDate": "2015-02-06T18:40:35+00:00", @@ -8625,6 +19970,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIJIKLU2IJ7WJ6DZFG", "PolicyName": "AmazonMobileAnalyticsFullAccess", "UpdateDate": "2015-02-06T18:40:34+00:00", @@ -8648,6 +19994,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIQLKQ4RXPUBBVVRDE", "PolicyName": "AmazonMobileAnalyticsNon-financialReportAccess", "UpdateDate": "2015-02-06T18:40:36+00:00", @@ -8671,11 +20018,71 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ5TAWBBQC2FAL3G6G", "PolicyName": "AmazonMobileAnalyticsWriteOnlyAccess", "UpdateDate": "2015-02-06T18:40:37+00:00", "VersionId": "v1" }, + "AmazonPersonalizeFullAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonPersonalizeFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-12-04T22:24:33+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "personalize:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:PutMetricData", + "cloudwatch:ListMetrics" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject", + "s3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*Personalize*", + "arn:aws:s3:::*personalize*" + ] + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": "personalize.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ45XBPPZNI3MMVAUK", + "PolicyName": "AmazonPersonalizeFullAccess", + "UpdateDate": "2019-05-30T23:46:59+00:00", + "VersionId": "v2" + }, "AmazonPollyFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonPollyFullAccess", "AttachmentCount": 0, @@ -8698,6 +20105,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJUZOYQU6XQYPR7EWS", "PolicyName": "AmazonPollyFullAccess", "UpdateDate": "2016-11-30T18:59:06+00:00", @@ -8707,14 +20115,16 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AmazonPollyReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2016-11-30T18:59:24+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ "polly:DescribeVoices", "polly:GetLexicon", + "polly:GetSpeechSynthesisTask", "polly:ListLexicons", + "polly:ListSpeechSynthesisTasks", "polly:SynthesizeSpeech" ], "Effect": "Allow", @@ -8728,23 +20138,153 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ5FENL3CVPL2FPDLA", "PolicyName": "AmazonPollyReadOnlyAccess", - "UpdateDate": "2016-11-30T18:59:24+00:00", - "VersionId": "v1" + "UpdateDate": "2018-07-17T16:41:07+00:00", + "VersionId": "v2" + }, + "AmazonRDSBetaServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonRDSBetaServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-05-02T19:41:04+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifyVpcEndpoint", + "ec2:RevokeSecurityGroupIngress", + "ec2:CreateVpcEndpoint", + "ec2:DescribeVpcEndpoints", + "ec2:DeleteVpcEndpoints" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/rds/*" + ] + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/rds/*:log-stream:*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ36CJAE6OYAR4YEK4", + "PolicyName": "AmazonRDSBetaServiceRolePolicy", + "UpdateDate": "2018-07-05T18:29:48+00:00", + "VersionId": "v3" + }, + "AmazonRDSDataFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRDSDataFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-20T21:29:36+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "secretsmanager:GetSecretValue", + "secretsmanager:PutResourcePolicy", + "secretsmanager:PutSecretValue", + "secretsmanager:DeleteSecret", + "secretsmanager:DescribeSecret", + "secretsmanager:TagResource" + ], + "Effect": "Allow", + "Resource": "arn:aws:secretsmanager:*:*:secret:rds-db-credentials/*", + "Sid": "SecretsManagerDbCredentialsAccess" + }, + { + "Action": [ + "dbqms:CreateFavoriteQuery", + "dbqms:DescribeFavoriteQueries", + "dbqms:UpdateFavoriteQuery", + "dbqms:DeleteFavoriteQueries", + "dbqms:GetQueryString", + "dbqms:CreateQueryHistory", + "dbqms:DescribeQueryHistory", + "dbqms:UpdateQueryHistory", + "dbqms:DeleteQueryHistory", + "dbqms:DescribeQueryHistory", + "rds-data:ExecuteSql", + "rds-data:ExecuteStatement", + "rds-data:BatchExecuteStatement", + "rds-data:BeginTransaction", + "rds-data:CommitTransaction", + "rds-data:RollbackTransaction", + "secretsmanager:CreateSecret", + "secretsmanager:ListSecrets", + "secretsmanager:GetRandomPassword", + "tag:GetResources" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "RDSDataServiceAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ5HUMNZCSW4IC74T6", + "PolicyName": "AmazonRDSDataFullAccess", + "UpdateDate": "2019-05-30T17:11:26+00:00", + "VersionId": "v2" }, "AmazonRDSDirectoryServiceAccess": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonRDSDirectoryServiceAccess", "AttachmentCount": 0, "CreateDate": "2016-02-26T02:02:05+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ "ds:DescribeDirectories", "ds:AuthorizeApplication", - "ds:UnauthorizeApplication" + "ds:UnauthorizeApplication", + "ds:GetAuthorizedApplicationDetails" ], "Effect": "Allow", "Resource": "*" @@ -8755,14 +20295,15 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIL4KBY57XWMYUHKUU", "PolicyName": "AmazonRDSDirectoryServiceAccess", - "UpdateDate": "2016-02-26T02:02:05+00:00", - "VersionId": "v1" + "UpdateDate": "2019-05-15T16:51:50+00:00", + "VersionId": "v2" }, "AmazonRDSEnhancedMonitoringRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole", - "AttachmentCount": 1, + "AttachmentCount": 0, "CreateDate": "2015-11-11T19:58:29+00:00", "DefaultVersionId": "v1", "Document": { @@ -8797,6 +20338,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJV7BS425S4PTSSVGK", "PolicyName": "AmazonRDSEnhancedMonitoringRole", "UpdateDate": "2015-11-11T19:58:29+00:00", @@ -8805,15 +20347,24 @@ aws_managed_policies_data = """ "AmazonRDSFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonRDSFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-09-14T23:40:45+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2015-02-06T18:40:52+00:00", + "DefaultVersionId": "v6", "Document": { "Statement": [ { "Action": [ "rds:*", + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:DeregisterScalableTarget", + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingActivities", + "application-autoscaling:DescribeScalingPolicies", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:RegisterScalableTarget", "cloudwatch:DescribeAlarms", "cloudwatch:GetMetricStatistics", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DeleteAlarms", "ec2:DescribeAccountAttributes", "ec2:DescribeAvailabilityZones", "ec2:DescribeInternetGateways", @@ -8823,6 +20374,7 @@ aws_managed_policies_data = """ "ec2:DescribeVpcs", "sns:ListSubscriptions", "sns:ListTopics", + "sns:Publish", "logs:DescribeLogStreams", "logs:GetLogEvents" ], @@ -8833,6 +20385,19 @@ aws_managed_policies_data = """ "Action": "pi:*", "Effect": "Allow", "Resource": "arn:aws:pi:*:*:metrics/rds/*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": [ + "rds.amazonaws.com", + "rds.application-autoscaling.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -8840,15 +20405,81 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI3R4QMOG6Q5A4VWVG", "PolicyName": "AmazonRDSFullAccess", - "UpdateDate": "2017-09-14T23:40:45+00:00", - "VersionId": "v4" + "UpdateDate": "2018-04-09T17:42:48+00:00", + "VersionId": "v6" + }, + "AmazonRDSPreviewServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonRDSPreviewServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-05-31T18:02:00+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:RevokeSecurityGroupIngress" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/rds/*" + ] + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/rds/*:log-stream:*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIZHJJBU3675JOUEMQ", + "PolicyName": "AmazonRDSPreviewServiceRolePolicy", + "UpdateDate": "2018-05-31T18:02:00+00:00", + "VersionId": "v1" }, "AmazonRDSReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonRDSReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-28T21:36:32+00:00", + "CreateDate": "2015-02-06T18:40:53+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -8882,15 +20513,107 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJKTTTYV2IIHKLZ346", "PolicyName": "AmazonRDSReadOnlyAccess", "UpdateDate": "2017-08-28T21:36:32+00:00", "VersionId": "v3" }, + "AmazonRDSServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonRDSServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-01-08T18:17:46+00:00", + "DefaultVersionId": "v6", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifyVpcEndpoint", + "ec2:RevokeSecurityGroupIngress", + "ec2:CreateVpcEndpoint", + "ec2:DescribeVpcEndpoints", + "ec2:DeleteVpcEndpoints", + "ec2:AssignPrivateIpAddresses", + "ec2:UnassignPrivateIpAddresses" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/rds/*", + "arn:aws:logs:*:*:log-group:/aws/docdb/*", + "arn:aws:logs:*:*:log-group:/aws/neptune/*" + ] + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/rds/*:log-stream:*", + "arn:aws:logs:*:*:log-group:/aws/docdb/*:log-stream:*", + "arn:aws:logs:*:*:log-group:/aws/neptune/*:log-stream:*" + ] + }, + { + "Action": [ + "kinesis:CreateStream", + "kinesis:PutRecord", + "kinesis:PutRecords", + "kinesis:DescribeStream", + "kinesis:SplitShard", + "kinesis:MergeShards", + "kinesis:DeleteStream", + "kinesis:UpdateShardCount" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:kinesis:*:*:stream/aws-rds-das-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIPEU5ZOBJWKWHUIBA", + "PolicyName": "AmazonRDSServiceRolePolicy", + "UpdateDate": "2019-04-16T20:12:27+00:00", + "VersionId": "v6" + }, "AmazonRedshiftFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonRedshiftFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-09-19T18:27:44+00:00", + "CreateDate": "2015-02-06T18:40:50+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -8933,11 +20656,52 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAISEKCHH4YDB46B5ZO", "PolicyName": "AmazonRedshiftFullAccess", "UpdateDate": "2017-09-19T18:27:44+00:00", "VersionId": "v2" }, + "AmazonRedshiftQueryEditor": { + "Arn": "arn:aws:iam::aws:policy/AmazonRedshiftQueryEditor", + "AttachmentCount": 0, + "CreateDate": "2018-10-04T22:50:32+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "redshift:GetClusterCredentials", + "redshift:ListSchemas", + "redshift:ListTables", + "redshift:ListDatabases", + "redshift:ExecuteQuery", + "redshift:FetchResults", + "redshift:CancelQuery", + "redshift:DescribeClusters", + "redshift:DescribeQuery", + "redshift:DescribeTable", + "redshift:ViewQueriesFromConsole", + "redshift:DescribeSavedQueries", + "redshift:CreateSavedQuery", + "redshift:DeleteSavedQueries", + "redshift:ModifySavedQuery" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAINVFHHP7CWVHTGBGM", + "PolicyName": "AmazonRedshiftQueryEditor", + "UpdateDate": "2018-10-04T22:50:32+00:00", + "VersionId": "v1" + }, "AmazonRedshiftReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonRedshiftReadOnlyAccess", "AttachmentCount": 0, @@ -8971,6 +20735,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIGD46KSON64QBSEZM", "PolicyName": "AmazonRedshiftReadOnlyAccess", "UpdateDate": "2015-02-06T18:40:51+00:00", @@ -8980,7 +20745,7 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonRedshiftServiceLinkedRolePolicy", "AttachmentCount": 0, "CreateDate": "2017-09-18T19:19:45+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -8988,7 +20753,7 @@ aws_managed_policies_data = """ "ec2:DescribeVpcs", "ec2:DescribeSubnets", "ec2:DescribeNetworkInterfaces", - "ec2:DescribeAddress", + "ec2:DescribeAddresses", "ec2:AssociateAddress", "ec2:DisassociateAddress", "ec2:CreateNetworkInterface", @@ -9004,10 +20769,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJPY2VXNRUYOY3SRZS", "PolicyName": "AmazonRedshiftServiceLinkedRolePolicy", - "UpdateDate": "2017-09-18T19:19:45+00:00", - "VersionId": "v1" + "UpdateDate": "2017-09-25T21:20:15+00:00", + "VersionId": "v2" }, "AmazonRekognitionFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonRekognitionFullAccess", @@ -9029,6 +20795,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIWDAOK6AIFDVX6TT6", "PolicyName": "AmazonRekognitionFullAccess", "UpdateDate": "2016-11-30T14:40:44+00:00", @@ -9038,7 +20805,7 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AmazonRekognitionReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2016-11-30T14:58:06+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -9049,7 +20816,19 @@ aws_managed_policies_data = """ "rekognition:ListCollections", "rekognition:ListFaces", "rekognition:SearchFaces", - "rekognition:SearchFacesByImage" + "rekognition:SearchFacesByImage", + "rekognition:DetectText", + "rekognition:GetCelebrityInfo", + "rekognition:RecognizeCelebrities", + "rekognition:DetectModerationLabels", + "rekognition:GetLabelDetection", + "rekognition:GetFaceDetection", + "rekognition:GetContentModeration", + "rekognition:GetPersonTracking", + "rekognition:GetCelebrityRecognition", + "rekognition:GetFaceSearch", + "rekognition:DescribeStreamProcessor", + "rekognition:ListStreamProcessors" ], "Effect": "Allow", "Resource": "*" @@ -9060,9 +20839,158 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAILWSUHXUY4ES43SA4", "PolicyName": "AmazonRekognitionReadOnlyAccess", - "UpdateDate": "2016-11-30T14:58:06+00:00", + "UpdateDate": "2017-12-06T23:28:39+00:00", + "VersionId": "v2" + }, + "AmazonRekognitionServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonRekognitionServiceRole", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T16:52:13+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:AmazonRekognition*" + }, + { + "Action": [ + "kinesis:PutRecord", + "kinesis:PutRecords" + ], + "Effect": "Allow", + "Resource": "arn:aws:kinesis:*:*:stream/AmazonRekognition*" + }, + { + "Action": [ + "kinesisvideo:GetDataEndpoint", + "kinesisvideo:GetMedia" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJI6Q3CUQAVBJ2CTE2", + "PolicyName": "AmazonRekognitionServiceRole", + "UpdateDate": "2017-11-29T16:52:13+00:00", + "VersionId": "v1" + }, + "AmazonRoute53AutoNamingFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRoute53AutoNamingFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-01-18T18:40:41+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "route53:GetHostedZone", + "route53:ListHostedZonesByName", + "route53:CreateHostedZone", + "route53:DeleteHostedZone", + "route53:ChangeResourceRecordSets", + "route53:CreateHealthCheck", + "route53:GetHealthCheck", + "route53:DeleteHealthCheck", + "route53:UpdateHealthCheck", + "ec2:DescribeVpcs", + "ec2:DescribeRegions", + "servicediscovery:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJCNJBBLMJN2ZMV62Y", + "PolicyName": "AmazonRoute53AutoNamingFullAccess", + "UpdateDate": "2018-01-18T18:40:41+00:00", + "VersionId": "v1" + }, + "AmazonRoute53AutoNamingReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRoute53AutoNamingReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-01-18T03:02:59+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "servicediscovery:Get*", + "servicediscovery:List*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJBPMV2EFBFFKJ6SI4", + "PolicyName": "AmazonRoute53AutoNamingReadOnlyAccess", + "UpdateDate": "2018-01-18T03:02:59+00:00", + "VersionId": "v1" + }, + "AmazonRoute53AutoNamingRegistrantAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRoute53AutoNamingRegistrantAccess", + "AttachmentCount": 0, + "CreateDate": "2018-03-12T22:33:20+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "route53:GetHostedZone", + "route53:ListHostedZonesByName", + "route53:ChangeResourceRecordSets", + "route53:CreateHealthCheck", + "route53:GetHealthCheck", + "route53:DeleteHealthCheck", + "route53:UpdateHealthCheck", + "servicediscovery:Get*", + "servicediscovery:List*", + "servicediscovery:RegisterInstance", + "servicediscovery:DeregisterInstance" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJKXLG7EKP2O5SVZW6", + "PolicyName": "AmazonRoute53AutoNamingRegistrantAccess", + "UpdateDate": "2018-03-12T22:33:20+00:00", "VersionId": "v1" }, "AmazonRoute53DomainsFullAccess": { @@ -9088,6 +21016,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIPAFBMIYUILMOKL6G", "PolicyName": "AmazonRoute53DomainsFullAccess", "UpdateDate": "2015-02-06T18:40:56+00:00", @@ -9116,6 +21045,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIDRINP6PPTRXYVQCI", "PolicyName": "AmazonRoute53DomainsReadOnlyAccess", "UpdateDate": "2015-02-06T18:40:57+00:00", @@ -9124,8 +21054,8 @@ aws_managed_policies_data = """ "AmazonRoute53FullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonRoute53FullAccess", "AttachmentCount": 0, - "CreateDate": "2017-02-14T21:25:53+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2015-02-06T18:40:54+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -9137,8 +21067,9 @@ aws_managed_policies_data = """ "elasticbeanstalk:DescribeEnvironments", "s3:ListBucket", "s3:GetBucketLocation", - "s3:GetBucketWebsiteConfiguration", + "s3:GetBucketWebsite", "ec2:DescribeVpcs", + "ec2:DescribeVpcEndpoints", "ec2:DescribeRegions", "sns:ListTopics", "sns:ListSubscriptionsByTopic", @@ -9147,6 +21078,11 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "apigateway:GET", + "Effect": "Allow", + "Resource": "arn:aws:apigateway:*::/domainnames" } ], "Version": "2012-10-17" @@ -9154,15 +21090,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJWVDLG5RPST6PHQ3A", "PolicyName": "AmazonRoute53FullAccess", - "UpdateDate": "2017-02-14T21:25:53+00:00", - "VersionId": "v2" + "UpdateDate": "2018-12-20T21:42:00+00:00", + "VersionId": "v4" }, "AmazonRoute53ReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonRoute53ReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-11-15T21:15:16+00:00", + "CreateDate": "2015-02-06T18:40:55+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -9183,14 +21120,84 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAITOYK2ZAOQFXV2JNC", "PolicyName": "AmazonRoute53ReadOnlyAccess", "UpdateDate": "2016-11-15T21:15:16+00:00", "VersionId": "v2" }, + "AmazonRoute53ResolverFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRoute53ResolverFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-05-30T18:10:50+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "route53resolver:*", + "ec2:DescribeSubnets", + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfaces", + "ec2:CreateNetworkInterfacePermission", + "ec2:DescribeSecurityGroups", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4MZN2MQCY3", + "PolicyName": "AmazonRoute53ResolverFullAccess", + "UpdateDate": "2019-05-30T18:10:50+00:00", + "VersionId": "v1" + }, + "AmazonRoute53ResolverReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonRoute53ResolverReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2019-05-30T18:11:31+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "route53resolver:Get*", + "route53resolver:List*", + "ec2:DescribeNetworkInterface", + "ec2:DescribeSecurityGroups", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4CARVKYCWY", + "PolicyName": "AmazonRoute53ResolverReadOnlyAccess", + "UpdateDate": "2019-05-30T18:11:31+00:00", + "VersionId": "v1" + }, "AmazonS3FullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonS3FullAccess", - "AttachmentCount": 1, + "AttachmentCount": 0, "CreateDate": "2015-02-06T18:40:58+00:00", "DefaultVersionId": "v1", "Document": { @@ -9206,6 +21213,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIFIR6V6BVTRAHWINE", "PolicyName": "AmazonS3FullAccess", "UpdateDate": "2015-02-06T18:40:58+00:00", @@ -9232,6 +21240,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIZTJ4DXE7G6AGAE6M", "PolicyName": "AmazonS3ReadOnlyAccess", "UpdateDate": "2015-02-06T18:40:59+00:00", @@ -9257,6 +21266,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ2P4NXCHAT7NDPNR4", "PolicyName": "AmazonSESFullAccess", "UpdateDate": "2015-02-06T18:41:02+00:00", @@ -9283,6 +21293,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAINV2XPFRMWJJNSCGI", "PolicyName": "AmazonSESReadOnlyAccess", "UpdateDate": "2015-02-06T18:41:03+00:00", @@ -9308,6 +21319,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJWEKLCXXUNT2SOLSG", "PolicyName": "AmazonSNSFullAccess", "UpdateDate": "2015-02-06T18:41:05+00:00", @@ -9334,6 +21346,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIZGQCQTFOFPMHSB6W", "PolicyName": "AmazonSNSReadOnlyAccess", "UpdateDate": "2015-02-06T18:41:06+00:00", @@ -9365,6 +21378,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJK5GQB7CIK7KHY2GA", "PolicyName": "AmazonSNSRole", "UpdateDate": "2015-02-06T18:41:30+00:00", @@ -9390,6 +21404,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI65L554VRJ33ECQS6", "PolicyName": "AmazonSQSFullAccess", "UpdateDate": "2015-02-06T18:41:07+00:00", @@ -9399,12 +21414,14 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AmazonSQSReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2015-02-06T18:41:08+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ListDeadLetterSourceQueues", "sqs:ListQueues" ], "Effect": "Allow", @@ -9416,10 +21433,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIUGSSQY362XGCM6KW", "PolicyName": "AmazonSQSReadOnlyAccess", - "UpdateDate": "2015-02-06T18:41:08+00:00", - "VersionId": "v1" + "UpdateDate": "2018-08-20T23:35:49+00:00", + "VersionId": "v2" }, "AmazonSSMAutomationApproverAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonSSMAutomationApproverAccess", @@ -9445,6 +21463,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIDSSXIRWBSLWWIORC", "PolicyName": "AmazonSSMAutomationApproverAccess", "UpdateDate": "2017-08-07T23:07:28+00:00", @@ -9453,7 +21472,7 @@ aws_managed_policies_data = """ "AmazonSSMAutomationRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonSSMAutomationRole", "AttachmentCount": 0, - "CreateDate": "2017-07-24T23:29:12+00:00", + "CreateDate": "2016-12-05T22:09:55+00:00", "DefaultVersionId": "v5", "Document": { "Statement": [ @@ -9516,16 +21535,44 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJIBQCTBCXD2XRNB6W", "PolicyName": "AmazonSSMAutomationRole", "UpdateDate": "2017-07-24T23:29:12+00:00", "VersionId": "v5" }, + "AmazonSSMDirectoryServiceAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSSMDirectoryServiceAccess", + "AttachmentCount": 0, + "CreateDate": "2019-03-15T17:44:38+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ds:CreateComputer", + "ds:DescribeDirectories" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ7OJQH3CZU674ERII", + "PolicyName": "AmazonSSMDirectoryServiceAccess", + "UpdateDate": "2019-03-15T17:44:38+00:00", + "VersionId": "v1" + }, "AmazonSSMFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonSSMFullAccess", "AttachmentCount": 0, - "CreateDate": "2016-03-07T21:09:12+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2015-05-29T17:39:47+00:00", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -9540,6 +21587,24 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "ssm.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/ssm.amazonaws.com/AWSServiceRoleForAmazonSSM*" + }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/ssm.amazonaws.com/AWSServiceRoleForAmazonSSM*" } ], "Version": "2012-10-17" @@ -9547,15 +21612,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJA7V6HI4ISQFMDYAG", "PolicyName": "AmazonSSMFullAccess", - "UpdateDate": "2016-03-07T21:09:12+00:00", - "VersionId": "v2" + "UpdateDate": "2018-07-23T22:53:18+00:00", + "VersionId": "v3" }, "AmazonSSMMaintenanceWindowRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AmazonSSMMaintenanceWindowRole", "AttachmentCount": 0, - "CreateDate": "2017-08-09T20:49:14+00:00", + "CreateDate": "2016-12-01T15:57:54+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -9602,11 +21668,74 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJV3JNYSTZ47VOXYME", "PolicyName": "AmazonSSMMaintenanceWindowRole", "UpdateDate": "2017-08-09T20:49:14+00:00", "VersionId": "v2" }, + "AmazonSSMManagedInstanceCore": { + "Arn": "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore", + "AttachmentCount": 0, + "CreateDate": "2019-03-15T17:22:12+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ssm:DescribeAssociation", + "ssm:GetDeployablePatchSnapshotForInstance", + "ssm:GetDocument", + "ssm:DescribeDocument", + "ssm:GetManifest", + "ssm:GetParameter", + "ssm:GetParameters", + "ssm:ListAssociations", + "ssm:ListInstanceAssociations", + "ssm:PutInventory", + "ssm:PutComplianceItems", + "ssm:PutConfigurePackageResult", + "ssm:UpdateAssociationStatus", + "ssm:UpdateInstanceAssociationStatus", + "ssm:UpdateInstanceInformation" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ssmmessages:CreateControlChannel", + "ssmmessages:CreateDataChannel", + "ssmmessages:OpenControlChannel", + "ssmmessages:OpenDataChannel" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2messages:AcknowledgeMessage", + "ec2messages:DeleteMessage", + "ec2messages:FailMessage", + "ec2messages:GetEndpoint", + "ec2messages:GetMessages", + "ec2messages:SendReply" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIXSHM2BNB2D3AXXRU", + "PolicyName": "AmazonSSMManagedInstanceCore", + "UpdateDate": "2019-05-23T16:54:21+00:00", + "VersionId": "v2" + }, "AmazonSSMReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess", "AttachmentCount": 0, @@ -9629,16 +21758,589 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJODSKQGGJTHRYZ5FC", "PolicyName": "AmazonSSMReadOnlyAccess", "UpdateDate": "2015-05-29T17:44:19+00:00", "VersionId": "v1" }, + "AmazonSSMServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonSSMServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-11-13T19:20:08+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "ssm:CancelCommand", + "ssm:GetCommandInvocation", + "ssm:ListCommandInvocations", + "ssm:ListCommands", + "ssm:SendCommand", + "ssm:GetAutomationExecution", + "ssm:GetParameters", + "ssm:StartAutomationExecution" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "lambda:InvokeFunction" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:lambda:*:*:function:SSM*", + "arn:aws:lambda:*:*:function:*:SSM*" + ] + }, + { + "Action": [ + "states:DescribeExecution", + "states:StartExecution" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:states:*:*:stateMachine:SSM*", + "arn:aws:states:*:*:execution:SSM*" + ] + }, + { + "Action": [ + "resource-groups:ListGroups", + "resource-groups:ListGroupResources" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "tag:GetResources" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "ssm.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIXJ26NUGBA3TCV7EC", + "PolicyName": "AmazonSSMServiceRolePolicy", + "UpdateDate": "2018-07-25T22:14:20+00:00", + "VersionId": "v3" + }, + "AmazonSageMakerFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSageMakerFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T13:07:59+00:00", + "DefaultVersionId": "v11", + "Document": { + "Statement": [ + { + "Action": [ + "sagemaker:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "application-autoscaling:DeleteScalingPolicy", + "application-autoscaling:DeleteScheduledAction", + "application-autoscaling:DeregisterScalableTarget", + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingActivities", + "application-autoscaling:DescribeScalingPolicies", + "application-autoscaling:DescribeScheduledActions", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:PutScheduledAction", + "application-autoscaling:RegisterScalableTarget", + "aws-marketplace:ViewSubscriptions", + "cloudwatch:DeleteAlarms", + "cloudwatch:DescribeAlarms", + "cloudwatch:GetMetricData", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "cloudwatch:PutMetricAlarm", + "cloudwatch:PutMetricData", + "codecommit:BatchGetRepositories", + "codecommit:CreateRepository", + "codecommit:GetRepository", + "codecommit:ListBranches", + "codecommit:ListRepositories", + "cognito-idp:AdminAddUserToGroup", + "cognito-idp:AdminCreateUser", + "cognito-idp:AdminDeleteUser", + "cognito-idp:AdminDisableUser", + "cognito-idp:AdminEnableUser", + "cognito-idp:AdminRemoveUserFromGroup", + "cognito-idp:CreateGroup", + "cognito-idp:CreateUserPool", + "cognito-idp:CreateUserPoolClient", + "cognito-idp:CreateUserPoolDomain", + "cognito-idp:DescribeUserPool", + "cognito-idp:DescribeUserPoolClient", + "cognito-idp:ListGroups", + "cognito-idp:ListIdentityProviders", + "cognito-idp:ListUserPoolClients", + "cognito-idp:ListUserPools", + "cognito-idp:ListUsers", + "cognito-idp:ListUsersInGroup", + "cognito-idp:UpdateUserPool", + "cognito-idp:UpdateUserPoolClient", + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:CreateVpcEndpoint", + "ec2:DeleteNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", + "ec2:DescribeDhcpOptions", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcs", + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:CreateRepository", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + "ecr:Describe*", + "elastic-inference:Connect", + "glue:CreateJob", + "glue:DeleteJob", + "glue:GetJob", + "glue:GetJobRun", + "glue:GetJobRuns", + "glue:GetJobs", + "glue:ResetJobBookmark", + "glue:StartJobRun", + "glue:UpdateJob", + "groundtruthlabeling:*", + "iam:ListRoles", + "kms:DescribeKey", + "kms:ListAliases", + "lambda:ListFunctions", + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogStreams", + "logs:GetLogEvents", + "logs:PutLogEvents", + "sns:ListTopics" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ecr:SetRepositoryPolicy", + "ecr:CompleteLayerUpload", + "ecr:BatchDeleteImage", + "ecr:UploadLayerPart", + "ecr:DeleteRepositoryPolicy", + "ecr:InitiateLayerUpload", + "ecr:DeleteRepository", + "ecr:PutImage" + ], + "Effect": "Allow", + "Resource": "arn:aws:ecr:*:*:repository/*sagemaker*" + }, + { + "Action": [ + "codecommit:GitPull", + "codecommit:GitPush" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:codecommit:*:*:*sagemaker*", + "arn:aws:codecommit:*:*:*SageMaker*", + "arn:aws:codecommit:*:*:*Sagemaker*" + ] + }, + { + "Action": [ + "secretsmanager:ListSecrets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "secretsmanager:DescribeSecret", + "secretsmanager:GetSecretValue", + "secretsmanager:CreateSecret" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:secretsmanager:*:*:secret:AmazonSageMaker-*" + ] + }, + { + "Action": [ + "secretsmanager:DescribeSecret", + "secretsmanager:GetSecretValue" + ], + "Condition": { + "StringEquals": { + "secretsmanager:ResourceTag/SageMaker": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "robomaker:CreateSimulationApplication", + "robomaker:DescribeSimulationApplication", + "robomaker:DeleteSimulationApplication" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "robomaker:CreateSimulationJob", + "robomaker:DescribeSimulationJob", + "robomaker:CancelSimulationJob" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*SageMaker*", + "arn:aws:s3:::*Sagemaker*", + "arn:aws:s3:::*sagemaker*", + "arn:aws:s3:::*aws-glue*" + ] + }, + { + "Action": [ + "s3:CreateBucket", + "s3:GetBucketLocation", + "s3:ListBucket", + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject" + ], + "Condition": { + "StringEqualsIgnoreCase": { + "s3:ExistingObjectTag/SageMaker": "true" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "lambda:InvokeFunction" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:lambda:*:*:function:*SageMaker*", + "arn:aws:lambda:*:*:function:*sagemaker*", + "arn:aws:lambda:*:*:function:*Sagemaker*", + "arn:aws:lambda:*:*:function:*LabelingFunction*" + ] + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "sagemaker.application-autoscaling.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/sagemaker.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_SageMakerEndpoint" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "robomaker.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "sns:Subscribe", + "sns:CreateTopic" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sns:*:*:*SageMaker*", + "arn:aws:sns:*:*:*Sagemaker*", + "arn:aws:sns:*:*:*sagemaker*" + ] + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": [ + "sagemaker.amazonaws.com", + "glue.amazonaws.com", + "robomaker.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJZ5IWYMXO5QDB4QOG", + "PolicyName": "AmazonSageMakerFullAccess", + "UpdateDate": "2019-05-09T04:44:05+00:00", + "VersionId": "v11" + }, + "AmazonSageMakerReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AmazonSageMakerReadOnly", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T13:07:09+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:DescribeScalingActivities", + "application-autoscaling:DescribeScalingPolicies", + "application-autoscaling:DescribeScheduledActions", + "aws-marketplace:ViewSubscriptions", + "aws-marketplace:ViewSubscriptions", + "cloudwatch:DescribeAlarms", + "cognito-idp:DescribeUserPool", + "cognito-idp:DescribeUserPoolClient", + "cognito-idp:ListGroups", + "cognito-idp:ListIdentityProviders", + "cognito-idp:ListUserPoolClients", + "cognito-idp:ListUserPools", + "cognito-idp:ListUsers", + "cognito-idp:ListUsersInGroup", + "ecr:Describe*", + "sagemaker:Describe*", + "sagemaker:GetSearchSuggestions", + "sagemaker:List*", + "sagemaker:Search" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJTZ2FTFCQ6CFLQA2O", + "PolicyName": "AmazonSageMakerReadOnly", + "UpdateDate": "2019-01-04T22:22:07+00:00", + "VersionId": "v5" + }, + "AmazonSumerianFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonSumerianFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-04-24T20:14:16+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sumerian:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJMGUENPB56MXVVGBE", + "PolicyName": "AmazonSumerianFullAccess", + "UpdateDate": "2018-04-24T20:14:16+00:00", + "VersionId": "v1" + }, + "AmazonTextractFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonTextractFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T19:07:42+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "textract:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIQDD47A7H3GBVPWOQ", + "PolicyName": "AmazonTextractFullAccess", + "UpdateDate": "2018-11-28T19:07:42+00:00", + "VersionId": "v1" + }, + "AmazonTextractServiceRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/AmazonTextractServiceRole", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T19:12:16+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": "arn:aws:sns:*:*:AmazonTextract*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJBDSAWESWLL34WASG", + "PolicyName": "AmazonTextractServiceRole", + "UpdateDate": "2018-11-28T19:12:16+00:00", + "VersionId": "v1" + }, + "AmazonTranscribeFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonTranscribeFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-04-04T16:06:16+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "transcribe:*" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*transcribe*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAINAV45F5NT5RMFO7K", + "PolicyName": "AmazonTranscribeFullAccess", + "UpdateDate": "2018-04-04T16:06:16+00:00", + "VersionId": "v1" + }, + "AmazonTranscribeReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonTranscribeReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-04-04T16:05:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "transcribe:Get*", + "transcribe:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJM6JONISXCAZKFCAO", + "PolicyName": "AmazonTranscribeReadOnlyAccess", + "UpdateDate": "2018-04-04T16:05:06+00:00", + "VersionId": "v1" + }, "AmazonVPCCrossAccountNetworkInterfaceOperations": { "Arn": "arn:aws:iam::aws:policy/AmazonVPCCrossAccountNetworkInterfaceOperations", "AttachmentCount": 0, "CreateDate": "2017-07-18T20:47:16+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -9659,8 +22361,11 @@ aws_managed_policies_data = """ "ec2:CreateNetworkInterface", "ec2:DeleteNetworkInterface", "ec2:CreateNetworkInterfacePermission", + "ec2:DeleteNetworkInterfacePermission", + "ec2:DescribeNetworkInterfacePermissions", "ec2:ModifyNetworkInterfaceAttribute", "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeAvailabilityZones", "ec2:DescribeVpcs", "ec2:DescribeSubnets" ], @@ -9685,26 +22390,31 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ53Y4ZY5OHP4CNRJC", "PolicyName": "AmazonVPCCrossAccountNetworkInterfaceOperations", - "UpdateDate": "2017-07-18T20:47:16+00:00", - "VersionId": "v1" + "UpdateDate": "2019-01-07T19:16:23+00:00", + "VersionId": "v3" }, "AmazonVPCFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonVPCFullAccess", - "AttachmentCount": 1, - "CreateDate": "2015-12-17T17:25:44+00:00", - "DefaultVersionId": "v5", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:16+00:00", + "DefaultVersionId": "v7", "Document": { "Statement": [ { "Action": [ "ec2:AcceptVpcPeeringConnection", + "ec2:AcceptVpcEndpointConnections", "ec2:AllocateAddress", + "ec2:AssignIpv6Addresses", "ec2:AssignPrivateIpAddresses", "ec2:AssociateAddress", "ec2:AssociateDhcpOptions", "ec2:AssociateRouteTable", + "ec2:AssociateSubnetCidrBlock", + "ec2:AssociateVpcCidrBlock", "ec2:AttachClassicLinkVpc", "ec2:AttachInternetGateway", "ec2:AttachNetworkInterface", @@ -9712,7 +22422,10 @@ aws_managed_policies_data = """ "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateCustomerGateway", + "ec2:CreateDefaultSubnet", + "ec2:CreateDefaultVpc", "ec2:CreateDhcpOptions", + "ec2:CreateEgressOnlyInternetGateway", "ec2:CreateFlowLogs", "ec2:CreateInternetGateway", "ec2:CreateNatGateway", @@ -9720,6 +22433,7 @@ aws_managed_policies_data = """ "ec2:CreateNetworkAcl", "ec2:CreateNetworkAclEntry", "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", "ec2:CreateRoute", "ec2:CreateRouteTable", "ec2:CreateSecurityGroup", @@ -9727,18 +22441,22 @@ aws_managed_policies_data = """ "ec2:CreateTags", "ec2:CreateVpc", "ec2:CreateVpcEndpoint", + "ec2:CreateVpcEndpointConnectionNotification", + "ec2:CreateVpcEndpointServiceConfiguration", "ec2:CreateVpcPeeringConnection", "ec2:CreateVpnConnection", "ec2:CreateVpnConnectionRoute", "ec2:CreateVpnGateway", "ec2:DeleteCustomerGateway", "ec2:DeleteDhcpOptions", + "ec2:DeleteEgressOnlyInternetGateway", "ec2:DeleteFlowLogs", "ec2:DeleteInternetGateway", "ec2:DeleteNatGateway", "ec2:DeleteNetworkAcl", "ec2:DeleteNetworkAclEntry", "ec2:DeleteNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", "ec2:DeleteRoute", "ec2:DeleteRouteTable", "ec2:DeleteSecurityGroup", @@ -9746,15 +22464,19 @@ aws_managed_policies_data = """ "ec2:DeleteTags", "ec2:DeleteVpc", "ec2:DeleteVpcEndpoints", + "ec2:DeleteVpcEndpointConnectionNotifications", + "ec2:DeleteVpcEndpointServiceConfigurations", "ec2:DeleteVpcPeeringConnection", "ec2:DeleteVpnConnection", "ec2:DeleteVpnConnectionRoute", "ec2:DeleteVpnGateway", + "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", "ec2:DescribeClassicLinkInstances", "ec2:DescribeCustomerGateways", "ec2:DescribeDhcpOptions", + "ec2:DescribeEgressOnlyInternetGateways", "ec2:DescribeFlowLogs", "ec2:DescribeInstances", "ec2:DescribeInternetGateways", @@ -9763,15 +22485,23 @@ aws_managed_policies_data = """ "ec2:DescribeNatGateways", "ec2:DescribeNetworkAcls", "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfacePermissions", "ec2:DescribeNetworkInterfaces", "ec2:DescribePrefixLists", "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroupReferences", "ec2:DescribeSecurityGroups", + "ec2:DescribeStaleSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcAttribute", "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeVpcEndpointConnectionNotifications", + "ec2:DescribeVpcEndpointConnections", "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServicePermissions", "ec2:DescribeVpcEndpointServices", "ec2:DescribeVpcPeeringConnections", "ec2:DescribeVpcs", @@ -9783,15 +22513,25 @@ aws_managed_policies_data = """ "ec2:DetachVpnGateway", "ec2:DisableVgwRoutePropagation", "ec2:DisableVpcClassicLink", + "ec2:DisableVpcClassicLinkDnsSupport", "ec2:DisassociateAddress", "ec2:DisassociateRouteTable", + "ec2:DisassociateSubnetCidrBlock", + "ec2:DisassociateVpcCidrBlock", "ec2:EnableVgwRoutePropagation", "ec2:EnableVpcClassicLink", + "ec2:EnableVpcClassicLinkDnsSupport", "ec2:ModifyNetworkInterfaceAttribute", "ec2:ModifySubnetAttribute", "ec2:ModifyVpcAttribute", "ec2:ModifyVpcEndpoint", + "ec2:ModifyVpcEndpointConnectionNotification", + "ec2:ModifyVpcEndpointServiceConfiguration", + "ec2:ModifyVpcEndpointServicePermissions", + "ec2:ModifyVpcPeeringConnectionOptions", + "ec2:ModifyVpcTenancy", "ec2:MoveAddressToVpc", + "ec2:RejectVpcEndpointConnections", "ec2:RejectVpcPeeringConnection", "ec2:ReleaseAddress", "ec2:ReplaceNetworkAclAssociation", @@ -9802,7 +22542,10 @@ aws_managed_policies_data = """ "ec2:RestoreAddressToClassic", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", - "ec2:UnassignPrivateIpAddresses" + "ec2:UnassignIpv6Addresses", + "ec2:UnassignPrivateIpAddresses", + "ec2:UpdateSecurityGroupRuleDescriptionsEgress", + "ec2:UpdateSecurityGroupRuleDescriptionsIngress" ], "Effect": "Allow", "Resource": "*" @@ -9813,39 +22556,50 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJBWPGNOVKZD3JI2P2", "PolicyName": "AmazonVPCFullAccess", - "UpdateDate": "2015-12-17T17:25:44+00:00", - "VersionId": "v5" + "UpdateDate": "2018-03-15T18:30:25+00:00", + "VersionId": "v7" }, "AmazonVPCReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonVPCReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2015-12-17T17:25:56+00:00", - "DefaultVersionId": "v4", + "CreateDate": "2015-02-06T18:41:17+00:00", + "DefaultVersionId": "v6", "Document": { "Statement": [ { "Action": [ + "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeClassicLinkInstances", "ec2:DescribeCustomerGateways", "ec2:DescribeDhcpOptions", + "ec2:DescribeEgressOnlyInternetGateways", "ec2:DescribeFlowLogs", "ec2:DescribeInternetGateways", "ec2:DescribeMovingAddresses", "ec2:DescribeNatGateways", "ec2:DescribeNetworkAcls", "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfacePermissions", "ec2:DescribeNetworkInterfaces", "ec2:DescribePrefixLists", "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroupReferences", "ec2:DescribeSecurityGroups", + "ec2:DescribeStaleSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcAttribute", "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcEndpointConnectionNotifications", + "ec2:DescribeVpcEndpointConnections", + "ec2:DescribeVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServicePermissions", "ec2:DescribeVpcEndpointServices", "ec2:DescribeVpcPeeringConnections", "ec2:DescribeVpcs", @@ -9861,16 +22615,136 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIICZJNOJN36GTG6CM", "PolicyName": "AmazonVPCReadOnlyAccess", - "UpdateDate": "2015-12-17T17:25:56+00:00", - "VersionId": "v4" + "UpdateDate": "2018-03-07T18:34:42+00:00", + "VersionId": "v6" + }, + "AmazonWorkLinkFullAccess": { + "Arn": "arn:aws:iam::aws:policy/AmazonWorkLinkFullAccess", + "AttachmentCount": 0, + "CreateDate": "2019-01-23T18:52:09+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "worklink:*" + ], + "Effect": "Allow", + "Resource": "arn:aws:worklink:*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJM4ITL7TEVURHCQSY", + "PolicyName": "AmazonWorkLinkFullAccess", + "UpdateDate": "2019-01-23T18:52:09+00:00", + "VersionId": "v1" + }, + "AmazonWorkLinkReadOnly": { + "Arn": "arn:aws:iam::aws:policy/AmazonWorkLinkReadOnly", + "AttachmentCount": 0, + "CreateDate": "2019-01-23T19:07:10+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "worklink:Describe*", + "worklink:List*" + ], + "Effect": "Allow", + "Resource": "arn:aws:worklink:*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIANQMFGU4EUUZKFQ4", + "PolicyName": "AmazonWorkLinkReadOnly", + "UpdateDate": "2019-01-23T19:07:10+00:00", + "VersionId": "v1" + }, + "AmazonWorkLinkServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonWorkLinkServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2019-03-18T18:00:16+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", + "ec2:CreateNetworkInterfacePermission", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DeleteNetworkInterface" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "kinesis:PutRecord", + "kinesis:PutRecords" + ], + "Effect": "Allow", + "Resource": "arn:aws:kinesis:*:*:stream/AmazonWorkLink-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAINJJP6CO7ATFCV4CU", + "PolicyName": "AmazonWorkLinkServiceRolePolicy", + "UpdateDate": "2019-03-18T18:00:16+00:00", + "VersionId": "v1" + }, + "AmazonWorkMailEventsServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AmazonWorkMailEventsServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2019-04-16T16:52:43+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAZKAPJZG4JG5LNO3U7", + "PolicyName": "AmazonWorkMailEventsServiceRolePolicy", + "UpdateDate": "2019-04-16T16:52:43+00:00", + "VersionId": "v1" }, "AmazonWorkMailFullAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonWorkMailFullAccess", "AttachmentCount": 0, - "CreateDate": "2017-04-20T08:35:49+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2015-02-06T18:40:41+00:00", + "DefaultVersionId": "v6", "Document": { "Statement": [ { @@ -9907,11 +22781,49 @@ aws_managed_policies_data = """ "ec2:RevokeSecurityGroupIngress", "kms:DescribeKey", "kms:ListAliases", + "lambda:ListFunctions", + "route53:ChangeResourceRecordSets", + "route53:ListHostedZones", + "route53:ListResourceRecordSets", + "route53domains:CheckDomainAvailability", + "route53domains:ListDomains", "ses:*", - "workmail:*" + "workmail:*", + "iam:ListRoles", + "logs:DescribeLogGroups", + "logs:CreateLogGroup", + "logs:PutRetentionPolicy" ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "events.workmail.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/events.workmail.amazonaws.com/AWSServiceRoleForAmazonWorkMailEvents*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:PassedToService": "events.workmail.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/*workmail*" } ], "Version": "2012-10-17" @@ -9919,16 +22831,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJQVKNMT7SVATQ4AUY", "PolicyName": "AmazonWorkMailFullAccess", - "UpdateDate": "2017-04-20T08:35:49+00:00", - "VersionId": "v3" + "UpdateDate": "2019-05-13T15:21:29+00:00", + "VersionId": "v6" }, "AmazonWorkMailReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AmazonWorkMailReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2015-02-06T18:40:42+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -9938,7 +22851,10 @@ aws_managed_policies_data = """ "workmail:Describe*", "workmail:Get*", "workmail:List*", - "workmail:Search*" + "workmail:Search*", + "lambda:ListFunctions", + "iam:ListRoles", + "logs:DescribeLogGroups" ], "Effect": "Allow", "Resource": "*" @@ -9949,15 +22865,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJHF7J65E2QFKCWAJM", "PolicyName": "AmazonWorkMailReadOnlyAccess", - "UpdateDate": "2015-02-06T18:40:42+00:00", - "VersionId": "v1" + "UpdateDate": "2019-05-13T15:12:46+00:00", + "VersionId": "v3" }, "AmazonWorkSpacesAdmin": { "Arn": "arn:aws:iam::aws:policy/AmazonWorkSpacesAdmin", "AttachmentCount": 0, - "CreateDate": "2016-08-18T23:08:42+00:00", + "CreateDate": "2015-09-22T22:21:15+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -9990,6 +22907,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ26AU6ATUQCT5KVJU", "PolicyName": "AmazonWorkSpacesAdmin", "UpdateDate": "2016-08-18T23:08:42+00:00", @@ -10013,6 +22931,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJPRL4KYETIH7XGTSS", "PolicyName": "AmazonWorkSpacesApplicationManagerAdminAccess", "UpdateDate": "2015-04-09T14:03:18+00:00", @@ -10054,6 +22973,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJLCDXYRINDMUXEVL6", "PolicyName": "AmazonZocaloFullAccess", "UpdateDate": "2015-02-06T18:41:13+00:00", @@ -10082,6 +23002,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAISRCSSJNS3QPKZJPM", "PolicyName": "AmazonZocaloReadOnlyAccess", "UpdateDate": "2015-02-06T18:41:14+00:00", @@ -10119,16 +23040,108 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIEL3HJCCWFVHA6KPG", "PolicyName": "ApplicationAutoScalingForAmazonAppStreamAccess", "UpdateDate": "2017-02-06T21:39:56+00:00", "VersionId": "v1" }, + "ApplicationDiscoveryServiceContinuousExportServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/ApplicationDiscoveryServiceContinuousExportServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-08-09T20:22:01+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "glue:CreateDatabase", + "glue:UpdateDatabase", + "glue:CreateTable", + "glue:UpdateTable", + "firehose:CreateDeliveryStream", + "firehose:DescribeDeliveryStream", + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "firehose:DeleteDeliveryStream", + "firehose:PutRecord", + "firehose:PutRecordBatch", + "firehose:UpdateDestination" + ], + "Effect": "Allow", + "Resource": "arn:aws:firehose:*:*:deliverystream/aws-application-discovery-service*" + }, + { + "Action": [ + "s3:CreateBucket", + "s3:ListBucket", + "s3:PutBucketLogging", + "s3:PutEncryptionConfiguration" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::aws-application-discovery-service*" + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::aws-application-discovery-service*/*" + }, + { + "Action": [ + "logs:CreateLogStream", + "logs:PutRetentionPolicy" + ], + "Effect": "Allow", + "Resource": "arn:aws:logs:*:*:log-group:/aws/application-discovery-service/firehose*" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": "firehose.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/AWSApplicationDiscoveryServiceFirehose" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": "firehose.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/service-role/AWSApplicationDiscoveryServiceFirehose" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJMGMY3P6OEWOELRFE", + "PolicyName": "ApplicationDiscoveryServiceContinuousExportServiceRolePolicy", + "UpdateDate": "2018-08-13T22:31:21+00:00", + "VersionId": "v2" + }, "AutoScalingConsoleFullAccess": { "Arn": "arn:aws:iam::aws:policy/AutoScalingConsoleFullAccess", "AttachmentCount": 0, "CreateDate": "2017-01-12T19:43:16+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -10136,10 +23149,16 @@ aws_managed_policies_data = """ "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateKeyPair", "ec2:CreateSecurityGroup", + "ec2:DescribeAccountAttributes", "ec2:DescribeAvailabilityZones", "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstances", "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribePlacementGroups", "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", "ec2:DescribeSubnets", "ec2:DescribeVpcs", "ec2:DescribeVpcClassicLink", @@ -10175,6 +23194,21 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": "iam:ListRoles", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "autoscaling.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -10182,10 +23216,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIYEN6FJGYYWJFFCZW", "PolicyName": "AutoScalingConsoleFullAccess", - "UpdateDate": "2017-01-12T19:43:16+00:00", - "VersionId": "v1" + "UpdateDate": "2018-02-06T23:15:36+00:00", + "VersionId": "v2" }, "AutoScalingConsoleReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/AutoScalingConsoleReadOnlyAccess", @@ -10237,6 +23272,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI3A7GDXOYQV3VUQMK", "PolicyName": "AutoScalingConsoleReadOnlyAccess", "UpdateDate": "2017-01-12T19:48:53+00:00", @@ -10246,7 +23282,7 @@ aws_managed_policies_data = """ "Arn": "arn:aws:iam::aws:policy/AutoScalingFullAccess", "AttachmentCount": 0, "CreateDate": "2017-01-12T19:31:58+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -10258,6 +23294,42 @@ aws_managed_policies_data = """ "Action": "cloudwatch:PutMetricAlarm", "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeImages", + "ec2:DescribeInstanceAttribute", + "ec2:DescribeInstances", + "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribePlacementGroups", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSubnets", + "ec2:DescribeVpcClassicLink" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "autoscaling.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -10265,10 +23337,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIAWRCSJDDXDXGPCFU", "PolicyName": "AutoScalingFullAccess", - "UpdateDate": "2017-01-12T19:31:58+00:00", - "VersionId": "v1" + "UpdateDate": "2018-02-06T21:59:13+00:00", + "VersionId": "v2" }, "AutoScalingNotificationAccessRole": { "Arn": "arn:aws:iam::aws:policy/service-role/AutoScalingNotificationAccessRole", @@ -10292,6 +23365,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIO2VMUPGDC5PZVXVA", "PolicyName": "AutoScalingNotificationAccessRole", "UpdateDate": "2015-02-06T18:41:22+00:00", @@ -10315,25 +23389,121 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIAFWUVLC2LPLSFTFG", "PolicyName": "AutoScalingReadOnlyAccess", "UpdateDate": "2017-01-12T19:39:35+00:00", "VersionId": "v1" }, + "AutoScalingServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/AutoScalingServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-01-08T23:10:55+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AttachClassicLinkVpc", + "ec2:CancelSpotInstanceRequests", + "ec2:CreateFleet", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:Describe*", + "ec2:DetachClassicLinkVpc", + "ec2:ModifyInstanceAttribute", + "ec2:RequestSpotInstances", + "ec2:RunInstances", + "ec2:TerminateInstances" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "EC2InstanceManagement" + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringLike": { + "iam:PassedToService": "ec2.amazonaws.com*" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "EC2InstanceProfileManagement" + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "EC2SpotManagement" + }, + { + "Action": [ + "elasticloadbalancing:Register*", + "elasticloadbalancing:Deregister*", + "elasticloadbalancing:Describe*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "ELBManagement" + }, + { + "Action": [ + "cloudwatch:DeleteAlarms", + "cloudwatch:DescribeAlarms", + "cloudwatch:PutMetricAlarm" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "CWManagement" + }, + { + "Action": [ + "sns:Publish" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "SNSManagement" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIC5D2V7MRWBMHGD7G", + "PolicyName": "AutoScalingServiceRolePolicy", + "UpdateDate": "2018-10-31T18:19:10+00:00", + "VersionId": "v2" + }, "Billing": { "Arn": "arn:aws:iam::aws:policy/job-function/Billing", "AttachmentCount": 0, "CreateDate": "2016-11-10T17:33:18+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ "aws-portal:*Billing", + "awsbillingconsole:*Billing", "aws-portal:*Usage", + "awsbillingconsole:*Usage", "aws-portal:*PaymentMethods", + "awsbillingconsole:*PaymentMethods", "budgets:ViewBudget", - "budgets:ModifyBudget" + "budgets:ModifyBudget", + "cur:*" ], "Effect": "Allow", "Resource": "*" @@ -10344,15 +23514,61 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/job-function/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIFTHXT6FFMIRT7ZEA", "PolicyName": "Billing", - "UpdateDate": "2016-11-10T17:33:18+00:00", - "VersionId": "v1" + "UpdateDate": "2018-02-06T23:46:37+00:00", + "VersionId": "v2" + }, + "ClientVPNServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/ClientVPNServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-12-10T21:20:25+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:DescribeSecurityGroups", + "ec2:DescribeVpcs", + "ec2:DescribeSubnets", + "ec2:DescribeInternetGateways", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DeleteNetworkInterface", + "ec2:DescribeAccountAttributes", + "ds:AuthorizeApplication", + "ds:DescribeDirectories", + "ds:GetDirectoryLimits", + "ds:ListAuthorizedApplications", + "ds:UnauthorizeApplication", + "logs:DescribeLogStreams", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogGroups", + "acm:GetCertificate", + "acm:DescribeCertificate" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI2SV25KUCYQYS5N74", + "PolicyName": "ClientVPNServiceRolePolicy", + "UpdateDate": "2019-01-16T22:22:28+00:00", + "VersionId": "v2" }, "CloudFrontFullAccess": { "Arn": "arn:aws:iam::aws:policy/CloudFrontFullAccess", "AttachmentCount": 0, - "CreateDate": "2016-01-21T17:03:57+00:00", + "CreateDate": "2015-02-06T18:39:50+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -10380,6 +23596,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIPRV52SH6HDCCFY6U", "PolicyName": "CloudFrontFullAccess", "UpdateDate": "2016-01-21T17:03:57+00:00", @@ -10388,7 +23605,7 @@ aws_managed_policies_data = """ "CloudFrontReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/CloudFrontReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-01-21T17:03:28+00:00", + "CreateDate": "2015-02-06T18:39:55+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -10411,11 +23628,43 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJJZMNYOTZCNQP36LG", "PolicyName": "CloudFrontReadOnlyAccess", "UpdateDate": "2016-01-21T17:03:28+00:00", "VersionId": "v3" }, + "CloudHSMServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/CloudHSMServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-11-06T19:12:46+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJILYY7JP6JLMQG56I", + "PolicyName": "CloudHSMServiceRolePolicy", + "UpdateDate": "2017-11-06T19:12:46+00:00", + "VersionId": "v1" + }, "CloudSearchFullAccess": { "Arn": "arn:aws:iam::aws:policy/CloudSearchFullAccess", "AttachmentCount": 0, @@ -10436,6 +23685,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIM6OOWKQ7L7VBOZOC", "PolicyName": "CloudSearchFullAccess", "UpdateDate": "2015-02-06T18:39:56+00:00", @@ -10462,11 +23712,52 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJWPLX7N7BCC3RZLHW", "PolicyName": "CloudSearchReadOnlyAccess", "UpdateDate": "2015-02-06T18:39:57+00:00", "VersionId": "v1" }, + "CloudTrailServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/CloudTrailServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-10-24T21:21:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudtrail:*" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "CloudTrailFullAccess" + }, + { + "Action": [ + "organizations:DescribeAccount", + "organizations:DescribeOrganization", + "organizations:ListAccounts", + "organizations:ListAWSServiceAccessForOrganization" + ], + "Effect": "Allow", + "Resource": [ + "*" + ], + "Sid": "AwsOrgsAccess" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJXQJ45EGU6U7NQBW4", + "PolicyName": "CloudTrailServiceRolePolicy", + "UpdateDate": "2018-10-24T21:21:44+00:00", + "VersionId": "v1" + }, "CloudWatchActionsEC2Access": { "Arn": "arn:aws:iam::aws:policy/CloudWatchActionsEC2Access", "AttachmentCount": 0, @@ -10491,11 +23782,91 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIOWD4E3FVSORSZTGU", "PolicyName": "CloudWatchActionsEC2Access", "UpdateDate": "2015-07-07T00:00:33+00:00", "VersionId": "v1" }, + "CloudWatchAgentAdminPolicy": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchAgentAdminPolicy", + "AttachmentCount": 0, + "CreateDate": "2018-03-07T00:52:31+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:PutMetricData", + "ec2:DescribeTags", + "logs:PutLogEvents", + "logs:DescribeLogStreams", + "logs:DescribeLogGroups", + "logs:CreateLogStream", + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ssm:GetParameter", + "ssm:PutParameter" + ], + "Effect": "Allow", + "Resource": "arn:aws:ssm:*:*:parameter/AmazonCloudWatch-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAICMXPKT7EBAF6KR3O", + "PolicyName": "CloudWatchAgentAdminPolicy", + "UpdateDate": "2018-03-07T00:52:31+00:00", + "VersionId": "v1" + }, + "CloudWatchAgentServerPolicy": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy", + "AttachmentCount": 0, + "CreateDate": "2018-03-07T01:06:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:PutMetricData", + "ec2:DescribeTags", + "logs:PutLogEvents", + "logs:DescribeLogStreams", + "logs:DescribeLogGroups", + "logs:CreateLogStream", + "logs:CreateLogGroup" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ssm:GetParameter" + ], + "Effect": "Allow", + "Resource": "arn:aws:ssm:*:*:parameter/AmazonCloudWatch-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIGOPKN7KRDAKTLG4I", + "PolicyName": "CloudWatchAgentServerPolicy", + "UpdateDate": "2018-03-07T01:06:44+00:00", + "VersionId": "v1" + }, "CloudWatchEventsBuiltInTargetExecutionAccess": { "Arn": "arn:aws:iam::aws:policy/service-role/CloudWatchEventsBuiltInTargetExecutionAccess", "AttachmentCount": 0, @@ -10521,6 +23892,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIC5AQ5DATYSNF4AUM", "PolicyName": "CloudWatchEventsBuiltInTargetExecutionAccess", "UpdateDate": "2016-01-14T18:35:49+00:00", @@ -10551,6 +23923,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJZLOYLNHESMYOJAFU", "PolicyName": "CloudWatchEventsFullAccess", "UpdateDate": "2016-01-14T18:37:08+00:00", @@ -10577,6 +23950,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJJXD6JKJLK2WDLZNO", "PolicyName": "CloudWatchEventsInvocationAccess", "UpdateDate": "2016-01-14T18:36:33+00:00", @@ -10585,7 +23959,7 @@ aws_managed_policies_data = """ "CloudWatchEventsReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/CloudWatchEventsReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-10T17:25:34+00:00", + "CreateDate": "2016-01-14T18:27:18+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -10608,24 +23982,31 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIILJPXXA6F7GYLYBS", "PolicyName": "CloudWatchEventsReadOnlyAccess", "UpdateDate": "2017-08-10T17:25:34+00:00", "VersionId": "v2" }, - "CloudWatchFullAccess": { - "Arn": "arn:aws:iam::aws:policy/CloudWatchFullAccess", + "CloudWatchEventsServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/CloudWatchEventsServiceRolePolicy", "AttachmentCount": 0, - "CreateDate": "2015-02-06T18:40:00+00:00", + "CreateDate": "2017-11-17T00:42:04+00:00", "DefaultVersionId": "v1", "Document": { "Statement": [ { "Action": [ - "autoscaling:Describe*", - "cloudwatch:*", - "logs:*", - "sns:*" + "cloudwatch:DescribeAlarms", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeVolumeStatus", + "ec2:DescribeVolumes", + "ec2:RebootInstances", + "ec2:StopInstances", + "ec2:TerminateInstances", + "ec2:CreateSnapshot" ], "Effect": "Allow", "Resource": "*" @@ -10635,11 +24016,54 @@ aws_managed_policies_data = """ }, "IsAttachable": true, "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJNVASSNSIDZIP4X7I", + "PolicyName": "CloudWatchEventsServiceRolePolicy", + "UpdateDate": "2017-11-17T00:42:04+00:00", + "VersionId": "v1" + }, + "CloudWatchFullAccess": { + "Arn": "arn:aws:iam::aws:policy/CloudWatchFullAccess", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:40:00+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:Describe*", + "cloudwatch:*", + "logs:*", + "sns:*", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:GetRole" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "events.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/events.amazonaws.com/AWSServiceRoleForCloudWatchEvents*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIKEABORKUXN6DEAZU", "PolicyName": "CloudWatchFullAccess", - "UpdateDate": "2015-02-06T18:40:00+00:00", - "VersionId": "v1" + "UpdateDate": "2018-08-09T19:10:43+00:00", + "VersionId": "v3" }, "CloudWatchLogsFullAccess": { "Arn": "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess", @@ -10661,6 +24085,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ3ZGNWK2R5HW5BQFO", "PolicyName": "CloudWatchLogsFullAccess", "UpdateDate": "2015-02-06T18:40:02+00:00", @@ -10669,8 +24094,8 @@ aws_managed_policies_data = """ "CloudWatchLogsReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/CloudWatchLogsReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-14T22:22:16+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2015-02-06T18:40:03+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -10678,6 +24103,8 @@ aws_managed_policies_data = """ "logs:Describe*", "logs:Get*", "logs:List*", + "logs:StartQuery", + "logs:StopQuery", "logs:TestMetricFilter", "logs:FilterLogEvents" ], @@ -10690,16 +24117,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ2YIYDYSNNEHK3VKW", "PolicyName": "CloudWatchLogsReadOnlyAccess", - "UpdateDate": "2017-08-14T22:22:16+00:00", - "VersionId": "v3" + "UpdateDate": "2019-01-14T19:32:45+00:00", + "VersionId": "v4" }, "CloudWatchReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/CloudWatchReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2015-02-06T18:40:01+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -10709,8 +24137,10 @@ aws_managed_policies_data = """ "cloudwatch:Get*", "cloudwatch:List*", "logs:Get*", + "logs:List*", "logs:Describe*", "logs:TestMetricFilter", + "logs:FilterLogEvents", "sns:Get*", "sns:List*" ], @@ -10723,16 +24153,353 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJN23PDQP7SZQAE3QE", "PolicyName": "CloudWatchReadOnlyAccess", - "UpdateDate": "2015-02-06T18:40:01+00:00", + "UpdateDate": "2018-05-10T21:40:42+00:00", + "VersionId": "v3" + }, + "CloudwatchApplicationInsightsServiceLinkedRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/CloudwatchApplicationInsightsServiceLinkedRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-12-01T16:22:12+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "cloudwatch:DescribeAlarmHistory", + "cloudwatch:DescribeAlarms", + "cloudwatch:GetMetricData", + "cloudwatch:ListMetrics", + "cloudwatch:PutMetricAlarm", + "cloudwatch:DeleteAlarms" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "logs:GetLogEvents", + "logs:DescribeLogStreams", + "logs:DescribeLogGroups" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "events:DescribeRule" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "cloudFormation:CreateStack", + "cloudFormation:UpdateStack", + "cloudFormation:DeleteStack" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:cloudformation:*:*:stack/ApplicationInsights-*" + ] + }, + { + "Action": [ + "cloudFormation:DescribeStacks", + "cloudFormation:ListStackResources" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "tag:GetResources" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "resource-groups:ListGroupResources", + "resource-groups:GetGroupQuery", + "resource-groups:GetGroup" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ssm:PutParameter", + "ssm:DeleteParameter", + "ssm:AddTagsToResource" + ], + "Effect": "Allow", + "Resource": "arn:aws:ssm:*:*:parameter/AmazonCloudWatch-ApplicationInsights-*" + }, + { + "Action": [ + "ssm:CreateAssociation", + "ssm:UpdateAssociation", + "ssm:DeleteAssociation", + "ssm:DescribeAssociation" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:ec2:*:*:instance/*", + "arn:aws:ssm:*:*:association/*", + "arn:aws:ssm:*:*:managed-instance/*", + "arn:aws:ssm:*:*:document/AWSEC2-ApplicationInsightsCloudwatchAgentInstallAndConfigure", + "arn:aws:ssm:*:*:document/AWS-ConfigureAWSPackage", + "arn:aws:ssm:*:*:document/AmazonCloudWatch-ManageAgent" + ] + }, + { + "Action": [ + "ssm:GetOpsItem", + "ssm:CreateOpsItem", + "ssm:DescribeOpsItems", + "ssm:UpdateOpsItem", + "ssm:DescribeInstanceInformation" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "ec2:DescribeInstances" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJH3SHQERZRQMQOQ44", + "PolicyName": "CloudwatchApplicationInsightsServiceLinkedRolePolicy", + "UpdateDate": "2019-05-24T18:26:41+00:00", + "VersionId": "v3" + }, + "ComprehendDataAccessRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/service-role/ComprehendDataAccessRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2019-03-06T22:28:15+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": { + "Action": [ + "s3:GetObject", + "s3:ListBucket", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*Comprehend*", + "arn:aws:s3:::*comprehend*" + ] + }, + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJHSDRRKS2Z3MYUPQY", + "PolicyName": "ComprehendDataAccessRolePolicy", + "UpdateDate": "2019-03-06T22:28:15+00:00", + "VersionId": "v1" + }, + "ComprehendFullAccess": { + "Arn": "arn:aws:iam::aws:policy/ComprehendFullAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T18:08:43+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "comprehend:*", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:GetBucketLocation", + "iam:ListRoles", + "iam:GetRole" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAITBM2PMWNG2P7RZEQ", + "PolicyName": "ComprehendFullAccess", + "UpdateDate": "2017-12-05T01:36:24+00:00", + "VersionId": "v2" + }, + "ComprehendMedicalFullAccess": { + "Arn": "arn:aws:iam::aws:policy/ComprehendMedicalFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-27T17:55:52+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "comprehendmedical:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJR5SUEX6PPJ3K4RAO", + "PolicyName": "ComprehendMedicalFullAccess", + "UpdateDate": "2018-11-27T17:55:52+00:00", + "VersionId": "v1" + }, + "ComprehendReadOnly": { + "Arn": "arn:aws:iam::aws:policy/ComprehendReadOnly", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T18:10:19+00:00", + "DefaultVersionId": "v5", + "Document": { + "Statement": [ + { + "Action": [ + "comprehend:DetectDominantLanguage", + "comprehend:BatchDetectDominantLanguage", + "comprehend:DetectEntities", + "comprehend:BatchDetectEntities", + "comprehend:DetectKeyPhrases", + "comprehend:BatchDetectKeyPhrases", + "comprehend:DetectSentiment", + "comprehend:BatchDetectSentiment", + "comprehend:DetectSyntax", + "comprehend:BatchDetectSyntax", + "comprehend:DescribeTopicsDetectionJob", + "comprehend:ListTopicsDetectionJobs", + "comprehend:DescribeDominantLanguageDetectionJob", + "comprehend:ListDominantLanguageDetectionJobs", + "comprehend:DescribeEntitiesDetectionJob", + "comprehend:ListEntitiesDetectionJobs", + "comprehend:DescribeKeyPhrasesDetectionJob", + "comprehend:ListKeyPhrasesDetectionJobs", + "comprehend:DescribeSentimentDetectionJob", + "comprehend:ListSentimentDetectionJobs", + "comprehend:DescribeDocumentClassifier", + "comprehend:ListDocumentClassifiers", + "comprehend:DescribeDocumentClassificationJob", + "comprehend:ListDocumentClassificationJobs", + "comprehend:DescribeEntityRecognizer", + "comprehend:ListEntityRecognizers" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJIUV5K2YCHQBBAH7G", + "PolicyName": "ComprehendReadOnly", + "UpdateDate": "2018-11-20T01:54:51+00:00", + "VersionId": "v5" + }, + "DAXServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/DAXServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-03-05T17:51:25+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:RevokeSecurityGroupIngress" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJQWMGC67G4DWMREGM", + "PolicyName": "DAXServiceRolePolicy", + "UpdateDate": "2018-03-05T17:51:25+00:00", "VersionId": "v1" }, "DataScientist": { "Arn": "arn:aws:iam::aws:policy/job-function/DataScientist", "AttachmentCount": 0, "CreateDate": "2016-11-10T17:28:48+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v3", "Document": { "Statement": [ { @@ -10785,7 +24552,8 @@ aws_managed_policies_data = """ "s3:CreateBucket", "sns:CreateTopic", "sns:Get*", - "sns:List*" + "sns:List*", + "sagemaker:*" ], "Effect": "Allow", "Resource": "*" @@ -10821,7 +24589,6 @@ aws_managed_policies_data = """ }, { "Action": [ - "iam:GetRole", "iam:PassRole" ], "Effect": "Allow", @@ -10832,6 +24599,18 @@ aws_managed_policies_data = """ "arn:aws:iam::*:role/EMR_DefaultRole", "arn:aws:iam::*:role/kinesis-*" ] + }, + { + "Action": [ + "iam:PassRole" + ], + "Condition": { + "StringEquals": { + "iam:PassedToService": "sagemaker.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" } ], "Version": "2012-10-17" @@ -10839,16 +24618,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/job-function/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ5YHI2BQW7EQFYDXS", "PolicyName": "DataScientist", - "UpdateDate": "2016-11-10T17:28:48+00:00", - "VersionId": "v1" + "UpdateDate": "2019-01-18T19:26:23+00:00", + "VersionId": "v3" }, "DatabaseAdministrator": { "Arn": "arn:aws:iam::aws:policy/job-function/DatabaseAdministrator", "AttachmentCount": 0, "CreateDate": "2016-11-10T17:25:43+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { @@ -10932,7 +24712,6 @@ aws_managed_policies_data = """ }, { "Action": [ - "iam:GetRole", "iam:PassRole" ], "Effect": "Allow", @@ -10952,14 +24731,454 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/job-function/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIGBMAW4VUQKOQNVT6", "PolicyName": "DatabaseAdministrator", - "UpdateDate": "2016-11-10T17:25:43+00:00", + "UpdateDate": "2019-01-08T00:48:02+00:00", + "VersionId": "v2" + }, + "DynamoDBReplicationServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/DynamoDBReplicationServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-11-09T23:55:34+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "dynamodb:GetItem", + "dynamodb:PutItem", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem", + "dynamodb:DescribeTable", + "dynamodb:Scan", + "dynamodb:DescribeStream", + "dynamodb:GetRecords", + "dynamodb:GetShardIterator", + "dynamodb:DescribeTimeToLive", + "application-autoscaling:RegisterScalableTarget", + "application-autoscaling:DescribeScalableTargets", + "application-autoscaling:PutScalingPolicy", + "application-autoscaling:DescribeScalingPolicies" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:CreateServiceLinkedRole" + ], + "Condition": { + "StringEquals": { + "iam:AWSServiceName": [ + "dynamodb.application-autoscaling.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJCUNRXL4BWASNJED2", + "PolicyName": "DynamoDBReplicationServiceRolePolicy", + "UpdateDate": "2018-07-02T21:48:12+00:00", + "VersionId": "v3" + }, + "ElastiCacheServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/ElastiCacheServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2017-12-07T17:50:04+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:DeleteNetworkInterface", + "ec2:DeleteSecurityGroup", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:RevokeSecurityGroupIngress" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIML5LIBUZBVCSF7PI", + "PolicyName": "ElastiCacheServiceRolePolicy", + "UpdateDate": "2017-12-07T17:50:04+00:00", "VersionId": "v1" }, + "ElasticLoadBalancingFullAccess": { + "Arn": "arn:aws:iam::aws:policy/ElasticLoadBalancingFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-09-20T20:42:07+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": "elasticloadbalancing:*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "ec2:DescribeVpcClassicLink", + "ec2:DescribeInstances", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeRouteTables", + "cognito-idp:DescribeUserPoolClient" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringEquals": { + "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIDPMLA3IUIOQCISJ4", + "PolicyName": "ElasticLoadBalancingFullAccess", + "UpdateDate": "2019-03-25T21:33:12+00:00", + "VersionId": "v4" + }, + "ElasticLoadBalancingReadOnly": { + "Arn": "arn:aws:iam::aws:policy/ElasticLoadBalancingReadOnly", + "AttachmentCount": 0, + "CreateDate": "2018-09-20T20:17:09+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": "elasticloadbalancing:Describe*", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:DescribeInstances", + "ec2:DescribeClassicLinkInstances", + "ec2:DescribeSecurityGroups" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJMO7B7SNFLQ6HH736", + "PolicyName": "ElasticLoadBalancingReadOnly", + "UpdateDate": "2018-09-20T20:17:09+00:00", + "VersionId": "v1" + }, + "FMSServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/FMSServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-03-28T23:01:12+00:00", + "DefaultVersionId": "v7", + "Document": { + "Statement": [ + { + "Action": [ + "waf:UpdateWebACL", + "waf:DeleteWebACL", + "waf:GetWebACL", + "waf:GetRuleGroup", + "waf:ListSubscribedRuleGroups", + "waf-regional:UpdateWebACL", + "waf-regional:DeleteWebACL", + "waf-regional:GetWebACL", + "waf-regional:GetRuleGroup", + "waf-regional:ListSubscribedRuleGroups", + "waf-regional:ListResourcesForWebACL", + "waf-regional:AssociateWebACL", + "waf-regional:DisassociateWebACL", + "elasticloadbalancing:SetWebACL", + "apigateway:SetWebACL" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:waf:*:*:webacl/*", + "arn:aws:waf-regional:*:*:webacl/*", + "arn:aws:waf:*:*:rulegroup/*", + "arn:aws:waf-regional:*:*:rulegroup/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*", + "arn:aws:apigateway:*::/restapis/*/stages/*" + ] + }, + { + "Action": [ + "waf:CreateWebACL", + "waf-regional:CreateWebACL", + "waf:GetChangeToken", + "waf-regional:GetChangeToken" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:waf:*", + "arn:aws:waf-regional:*" + ] + }, + { + "Action": [ + "waf:PutPermissionPolicy", + "waf:GetPermissionPolicy", + "waf:DeletePermissionPolicy", + "waf-regional:PutPermissionPolicy", + "waf-regional:GetPermissionPolicy", + "waf-regional:DeletePermissionPolicy" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:waf:*:*:webacl/*", + "arn:aws:waf:*:*:rulegroup/*", + "arn:aws:waf-regional:*:*:webacl/*", + "arn:aws:waf-regional:*:*:rulegroup/*" + ] + }, + { + "Action": [ + "cloudfront:GetDistribution", + "cloudfront:UpdateDistribution", + "cloudfront:ListDistributionsByWebACLId" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "config:DeleteConfigRule", + "config:DescribeComplianceByConfigRule", + "config:DescribeConfigRuleEvaluationStatus", + "config:DescribeConfigRules", + "config:GetComplianceDetailsByConfigRule", + "config:PutConfigRule", + "config:StartConfigRulesEvaluation" + ], + "Effect": "Allow", + "Resource": "arn:aws:config:*:*:config-rule/aws-service-rule/fms.amazonaws.com/*" + }, + { + "Action": [ + "config:DescribeConfigurationRecorders", + "config:DescribeConfigurationRecorderStatus", + "config:PutConfigurationRecorder", + "config:StartConfigurationRecorder", + "config:PutDeliveryChannel", + "config:DescribeDeliveryChannels", + "config:DescribeDeliveryChannelStatus", + "config:GetComplianceSummaryByConfigRule", + "config:GetDiscoveredResourceCounts" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:iam::*:role/aws-service-role/fms.amazonaws.com/AWSServiceRoleForFMS" + ] + }, + { + "Action": [ + "organizations:DescribeAccount", + "organizations:DescribeOrganization", + "organizations:ListAccounts" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "shield:CreateProtection", + "shield:DeleteProtection", + "shield:DescribeProtection", + "shield:ListProtections", + "shield:ListAttacks", + "shield:CreateSubscription", + "shield:DescribeSubscription", + "shield:GetSubscriptionState", + "shield:DescribeDRTAccess", + "shield:DescribeEmergencyContactSettings", + "shield:UpdateEmergencyContactSettings", + "elasticloadbalancing:DescribeLoadBalancers", + "ec2:DescribeAddresses" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI62NTGYJB446ACUEA", + "PolicyName": "FMSServiceRolePolicy", + "UpdateDate": "2019-03-08T18:02:51+00:00", + "VersionId": "v7" + }, + "FSxDeleteServiceLinkedRoleAccess": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/FSxDeleteServiceLinkedRoleAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-28T10:40:24+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus", + "iam:GetRole" + ], + "Effect": "Allow", + "Resource": "arn:*:iam::*:role/aws-service-role/s3.data-source.lustre.fsx.amazonaws.com/AWSServiceRoleForFSxS3Access_*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ6IRP2YV2YPKWPPNQ", + "PolicyName": "FSxDeleteServiceLinkedRoleAccess", + "UpdateDate": "2018-11-28T10:40:24+00:00", + "VersionId": "v1" + }, + "GlobalAcceleratorFullAccess": { + "Arn": "arn:aws:iam::aws:policy/GlobalAcceleratorFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-27T02:44:44+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "globalaccelerator:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ3NSRQKPB42BCNRT6", + "PolicyName": "GlobalAcceleratorFullAccess", + "UpdateDate": "2018-11-27T02:44:44+00:00", + "VersionId": "v1" + }, + "GlobalAcceleratorReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/GlobalAcceleratorReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-27T02:41:00+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "globalaccelerator:Describe*", + "globalaccelerator:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJYXHGCVENJKQZRNGU", + "PolicyName": "GlobalAcceleratorReadOnlyAccess", + "UpdateDate": "2018-11-27T02:41:00+00:00", + "VersionId": "v1" + }, + "GreengrassOTAUpdateArtifactAccess": { + "Arn": "arn:aws:iam::aws:policy/service-role/GreengrassOTAUpdateArtifactAccess", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T18:11:47+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::*-greengrass-updates/*" + ], + "Sid": "AllowsIotToAccessGreengrassOTAUpdateArtifacts" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIFGE66SKIK3GW5UC2", + "PolicyName": "GreengrassOTAUpdateArtifactAccess", + "UpdateDate": "2018-12-18T00:59:43+00:00", + "VersionId": "v2" + }, "IAMFullAccess": { "Arn": "arn:aws:iam::aws:policy/IAMFullAccess", - "AttachmentCount": 2, + "AttachmentCount": 0, "CreateDate": "2015-02-06T18:40:38+00:00", "DefaultVersionId": "v1", "Document": { @@ -10975,6 +25194,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI7XKCFMBPM3QQRRVQ", "PolicyName": "IAMFullAccess", "UpdateDate": "2015-02-06T18:40:38+00:00", @@ -10983,8 +25203,8 @@ aws_managed_policies_data = """ "IAMReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/IAMReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2016-09-06T17:06:37+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2015-02-06T18:40:39+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -10992,7 +25212,9 @@ aws_managed_policies_data = """ "iam:GenerateCredentialReport", "iam:GenerateServiceLastAccessedDetails", "iam:Get*", - "iam:List*" + "iam:List*", + "iam:SimulateCustomPolicy", + "iam:SimulatePrincipalPolicy" ], "Effect": "Allow", "Resource": "*" @@ -11003,10 +25225,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJKSO7NDY4T57MWDSQ", "PolicyName": "IAMReadOnlyAccess", - "UpdateDate": "2016-09-06T17:06:37+00:00", - "VersionId": "v3" + "UpdateDate": "2018-01-25T19:11:27+00:00", + "VersionId": "v4" }, "IAMSelfManageServiceSpecificCredentials": { "Arn": "arn:aws:iam::aws:policy/IAMSelfManageServiceSpecificCredentials", @@ -11032,6 +25255,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI4VT74EMXK2PMQJM2", "PolicyName": "IAMSelfManageServiceSpecificCredentials", "UpdateDate": "2016-12-22T17:25:18+00:00", @@ -11040,7 +25264,7 @@ aws_managed_policies_data = """ "IAMUserChangePassword": { "Arn": "arn:aws:iam::aws:policy/IAMUserChangePassword", "AttachmentCount": 1, - "CreateDate": "2016-11-15T23:18:55+00:00", + "CreateDate": "2016-11-15T00:25:16+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -11066,6 +25290,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ4L4MM2A7QIEB56MS", "PolicyName": "IAMUserChangePassword", "UpdateDate": "2016-11-15T23:18:55+00:00", @@ -11073,7 +25298,7 @@ aws_managed_policies_data = """ }, "IAMUserSSHKeys": { "Arn": "arn:aws:iam::aws:policy/IAMUserSSHKeys", - "AttachmentCount": 1, + "AttachmentCount": 0, "CreateDate": "2015-07-09T17:08:54+00:00", "DefaultVersionId": "v1", "Document": { @@ -11095,38 +25320,577 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJTSHUA4UXGXU7ANUA", "PolicyName": "IAMUserSSHKeys", "UpdateDate": "2015-07-09T17:08:54+00:00", "VersionId": "v1" }, - "NetworkAdministrator": { - "Arn": "arn:aws:iam::aws:policy/job-function/NetworkAdministrator", + "KafkaServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/KafkaServiceRolePolicy", "AttachmentCount": 0, - "CreateDate": "2017-03-20T18:44:58+00:00", + "CreateDate": "2018-11-15T23:31:48+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ - "autoscaling:Describe*", + "ec2:CreateNetworkInterface", + "ec2:DescribeNetworkInterfaces", + "ec2:CreateNetworkInterfacePermission", + "ec2:AttachNetworkInterface", + "ec2:DeleteNetworkInterface", + "ec2:DetachNetworkInterface", + "acm-pca:GetCertificateAuthorityCertificate" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJUXPRZ76MAP2EVQJU", + "PolicyName": "KafkaServiceRolePolicy", + "UpdateDate": "2019-05-23T19:58:58+00:00", + "VersionId": "v2" + }, + "LexBotPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/LexBotPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-02-17T22:18:13+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "polly:SynthesizeSpeech" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJJ3NZRBBQKSESXXJC", + "PolicyName": "LexBotPolicy", + "UpdateDate": "2017-02-17T22:18:13+00:00", + "VersionId": "v1" + }, + "LexChannelPolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/LexChannelPolicy", + "AttachmentCount": 0, + "CreateDate": "2017-02-17T23:23:24+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "lex:PostText" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJKYEISPO63JTBJWPY", + "PolicyName": "LexChannelPolicy", + "UpdateDate": "2017-02-17T23:23:24+00:00", + "VersionId": "v1" + }, + "LightsailExportAccess": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/LightsailExportAccess", + "AttachmentCount": 0, + "CreateDate": "2018-09-28T16:35:54+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "iam:DeleteServiceLinkedRole", + "iam:GetServiceLinkedRoleDeletionStatus" + ], + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/lightsail.amazonaws.com/AWSServiceRoleForLightsail*" + }, + { + "Action": [ + "ec2:CopySnapshot", + "ec2:DescribeSnapshots", + "ec2:CopyImage", + "ec2:DescribeImages" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ4LZGPQLZWMVR4WMQ", + "PolicyName": "LightsailExportAccess", + "UpdateDate": "2018-09-28T16:35:54+00:00", + "VersionId": "v1" + }, + "NeptuneConsoleFullAccess": { + "Arn": "arn:aws:iam::aws:policy/NeptuneConsoleFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-06-19T21:35:19+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "rds:CreateDBCluster", + "rds:CreateDBInstance" + ], + "Condition": { + "StringEquals": { + "rds:DatabaseEngine": "graphdb" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:rds:*" + ] + }, + { + "Action": [ + "rds:AddRoleToDBCluster", + "rds:AddSourceIdentifierToSubscription", + "rds:AddTagsToResource", + "rds:ApplyPendingMaintenanceAction", + "rds:CopyDBClusterParameterGroup", + "rds:CopyDBClusterSnapshot", + "rds:CopyDBParameterGroup", + "rds:CreateDBClusterParameterGroup", + "rds:CreateDBClusterSnapshot", + "rds:CreateDBParameterGroup", + "rds:CreateDBSubnetGroup", + "rds:CreateEventSubscription", + "rds:DeleteDBCluster", + "rds:DeleteDBClusterParameterGroup", + "rds:DeleteDBClusterSnapshot", + "rds:DeleteDBInstance", + "rds:DeleteDBParameterGroup", + "rds:DeleteDBSubnetGroup", + "rds:DeleteEventSubscription", + "rds:DescribeAccountAttributes", + "rds:DescribeCertificates", + "rds:DescribeDBClusterParameterGroups", + "rds:DescribeDBClusterParameters", + "rds:DescribeDBClusterSnapshotAttributes", + "rds:DescribeDBClusterSnapshots", + "rds:DescribeDBClusters", + "rds:DescribeDBEngineVersions", + "rds:DescribeDBInstances", + "rds:DescribeDBLogFiles", + "rds:DescribeDBParameterGroups", + "rds:DescribeDBParameters", + "rds:DescribeDBSecurityGroups", + "rds:DescribeDBSubnetGroups", + "rds:DescribeEngineDefaultClusterParameters", + "rds:DescribeEngineDefaultParameters", + "rds:DescribeEventCategories", + "rds:DescribeEventSubscriptions", + "rds:DescribeEvents", + "rds:DescribeOptionGroups", + "rds:DescribeOrderableDBInstanceOptions", + "rds:DescribePendingMaintenanceActions", + "rds:DescribeValidDBInstanceModifications", + "rds:DownloadDBLogFilePortion", + "rds:FailoverDBCluster", + "rds:ListTagsForResource", + "rds:ModifyDBCluster", + "rds:ModifyDBClusterParameterGroup", + "rds:ModifyDBClusterSnapshotAttribute", + "rds:ModifyDBInstance", + "rds:ModifyDBParameterGroup", + "rds:ModifyDBSubnetGroup", + "rds:ModifyEventSubscription", + "rds:PromoteReadReplicaDBCluster", + "rds:RebootDBInstance", + "rds:RemoveRoleFromDBCluster", + "rds:RemoveSourceIdentifierFromSubscription", + "rds:RemoveTagsFromResource", + "rds:ResetDBClusterParameterGroup", + "rds:ResetDBParameterGroup", + "rds:RestoreDBClusterFromSnapshot", + "rds:RestoreDBClusterToPointInTime" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", "ec2:AllocateAddress", + "ec2:AssignIpv6Addresses", + "ec2:AssignPrivateIpAddresses", + "ec2:AssociateAddress", + "ec2:AssociateRouteTable", + "ec2:AssociateSubnetCidrBlock", + "ec2:AssociateVpcCidrBlock", + "ec2:AttachInternetGateway", + "ec2:AttachNetworkInterface", + "ec2:CreateCustomerGateway", + "ec2:CreateDefaultSubnet", + "ec2:CreateDefaultVpc", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateNetworkInterface", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateVpc", + "ec2:CreateVpcEndpoint", + "ec2:CreateVpcEndpoint", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeCustomerGateways", + "ec2:DescribeInstances", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribePrefixLists", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroupReferences", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcs", + "ec2:DescribeVpcs", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifySubnetAttribute", + "ec2:ModifyVpcAttribute", + "ec2:ModifyVpcEndpoint", + "iam:ListRoles", + "iam:PassRole", + "kms:ListAliases", + "kms:ListKeyPolicies", + "kms:ListKeys", + "kms:ListRetirableGrants", + "logs:DescribeLogStreams", + "logs:GetLogEvents", + "sns:ListSubscriptions", + "sns:ListTopics", + "sns:Publish" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "rds.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/rds.amazonaws.com/AWSServiceRoleForRDS" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJWTD4ELX2KRNICUVQ", + "PolicyName": "NeptuneConsoleFullAccess", + "UpdateDate": "2018-11-06T21:19:54+00:00", + "VersionId": "v2" + }, + "NeptuneFullAccess": { + "Arn": "arn:aws:iam::aws:policy/NeptuneFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-05-30T19:17:31+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "rds:CreateDBCluster", + "rds:CreateDBInstance" + ], + "Condition": { + "StringEquals": { + "rds:DatabaseEngine": "graphdb" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws:rds:*" + ] + }, + { + "Action": [ + "rds:AddRoleToDBCluster", + "rds:AddSourceIdentifierToSubscription", + "rds:AddTagsToResource", + "rds:ApplyPendingMaintenanceAction", + "rds:CopyDBClusterParameterGroup", + "rds:CopyDBClusterSnapshot", + "rds:CopyDBParameterGroup", + "rds:CreateDBClusterParameterGroup", + "rds:CreateDBClusterSnapshot", + "rds:CreateDBParameterGroup", + "rds:CreateDBSubnetGroup", + "rds:CreateEventSubscription", + "rds:DeleteDBCluster", + "rds:DeleteDBClusterParameterGroup", + "rds:DeleteDBClusterSnapshot", + "rds:DeleteDBInstance", + "rds:DeleteDBParameterGroup", + "rds:DeleteDBSubnetGroup", + "rds:DeleteEventSubscription", + "rds:DescribeAccountAttributes", + "rds:DescribeCertificates", + "rds:DescribeDBClusterParameterGroups", + "rds:DescribeDBClusterParameters", + "rds:DescribeDBClusterSnapshotAttributes", + "rds:DescribeDBClusterSnapshots", + "rds:DescribeDBClusters", + "rds:DescribeDBEngineVersions", + "rds:DescribeDBInstances", + "rds:DescribeDBLogFiles", + "rds:DescribeDBParameterGroups", + "rds:DescribeDBParameters", + "rds:DescribeDBSecurityGroups", + "rds:DescribeDBSubnetGroups", + "rds:DescribeEngineDefaultClusterParameters", + "rds:DescribeEngineDefaultParameters", + "rds:DescribeEventCategories", + "rds:DescribeEventSubscriptions", + "rds:DescribeEvents", + "rds:DescribeOptionGroups", + "rds:DescribeOrderableDBInstanceOptions", + "rds:DescribePendingMaintenanceActions", + "rds:DescribeValidDBInstanceModifications", + "rds:DownloadDBLogFilePortion", + "rds:FailoverDBCluster", + "rds:ListTagsForResource", + "rds:ModifyDBCluster", + "rds:ModifyDBClusterParameterGroup", + "rds:ModifyDBClusterSnapshotAttribute", + "rds:ModifyDBInstance", + "rds:ModifyDBParameterGroup", + "rds:ModifyDBSubnetGroup", + "rds:ModifyEventSubscription", + "rds:PromoteReadReplicaDBCluster", + "rds:RebootDBInstance", + "rds:RemoveRoleFromDBCluster", + "rds:RemoveSourceIdentifierFromSubscription", + "rds:RemoveTagsFromResource", + "rds:ResetDBClusterParameterGroup", + "rds:ResetDBParameterGroup", + "rds:RestoreDBClusterFromSnapshot", + "rds:RestoreDBClusterToPointInTime" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics", + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "iam:PassRole", + "kms:ListAliases", + "kms:ListKeyPolicies", + "kms:ListKeys", + "kms:ListRetirableGrants", + "logs:DescribeLogStreams", + "logs:GetLogEvents", + "sns:ListSubscriptions", + "sns:ListTopics", + "sns:Publish" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "iam:CreateServiceLinkedRole", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "rds.amazonaws.com" + } + }, + "Effect": "Allow", + "Resource": "arn:aws:iam::*:role/aws-service-role/rds.amazonaws.com/AWSServiceRoleForRDS" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIXSDEYRCNJRC6ITFK", + "PolicyName": "NeptuneFullAccess", + "UpdateDate": "2018-11-06T21:21:19+00:00", + "VersionId": "v3" + }, + "NeptuneReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/NeptuneReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-05-30T19:16:37+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "rds:DescribeAccountAttributes", + "rds:DescribeCertificates", + "rds:DescribeDBClusterParameterGroups", + "rds:DescribeDBClusterParameters", + "rds:DescribeDBClusterSnapshotAttributes", + "rds:DescribeDBClusterSnapshots", + "rds:DescribeDBClusters", + "rds:DescribeDBEngineVersions", + "rds:DescribeDBInstances", + "rds:DescribeDBLogFiles", + "rds:DescribeDBParameterGroups", + "rds:DescribeDBParameters", + "rds:DescribeDBSubnetGroups", + "rds:DescribeEventCategories", + "rds:DescribeEventSubscriptions", + "rds:DescribeEvents", + "rds:DescribeOrderableDBInstanceOptions", + "rds:DescribePendingMaintenanceActions", + "rds:DownloadDBLogFilePortion", + "rds:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:DescribeAccountAttributes", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInternetGateways", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "kms:ListKeys", + "kms:ListRetirableGrants", + "kms:ListAliases", + "kms:ListKeyPolicies" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "logs:DescribeLogStreams", + "logs:GetLogEvents" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:logs:*:*:log-group:/aws/rds/*:log-stream:*", + "arn:aws:logs:*:*:log-group:/aws/neptune/*:log-stream:*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJS5OQ5RXULC66WTGQ", + "PolicyName": "NeptuneReadOnlyAccess", + "UpdateDate": "2018-05-30T19:16:37+00:00", + "VersionId": "v1" + }, + "NetworkAdministrator": { + "Arn": "arn:aws:iam::aws:policy/job-function/NetworkAdministrator", + "AttachmentCount": 0, + "CreateDate": "2016-11-10T17:31:35+00:00", + "DefaultVersionId": "v3", + "Document": { + "Statement": [ + { + "Action": [ + "autoscaling:Describe*", + "ec2:AcceptVpcEndpointConnections", + "ec2:AllocateAddress", + "ec2:AssignIpv6Addresses", "ec2:AssignPrivateIpAddresses", "ec2:AssociateAddress", "ec2:AssociateDhcpOptions", "ec2:AssociateRouteTable", + "ec2:AssociateSubnetCidrBlock", + "ec2:AssociateVpcCidrBlock", "ec2:AttachInternetGateway", "ec2:AttachNetworkInterface", "ec2:AttachVpnGateway", "ec2:CreateCustomerGateway", + "ec2:CreateDefaultSubnet", + "ec2:CreateDefaultVpc", "ec2:CreateDhcpOptions", + "ec2:CreateEgressOnlyInternetGateway", "ec2:CreateFlowLogs", "ec2:CreateInternetGateway", "ec2:CreateNatGateway", "ec2:CreateNetworkAcl", - "ec2:CreateNetworkAcl", "ec2:CreateNetworkAclEntry", "ec2:CreateNetworkInterface", + "ec2:CreateNetworkInterfacePermission", + "ec2:CreatePlacementGroup", "ec2:CreateRoute", "ec2:CreateRouteTable", "ec2:CreateSecurityGroup", @@ -11134,28 +25898,33 @@ aws_managed_policies_data = """ "ec2:CreateTags", "ec2:CreateVpc", "ec2:CreateVpcEndpoint", + "ec2:CreateVpcEndpointConnectionNotification", + "ec2:CreateVpcEndpointServiceConfiguration", "ec2:CreateVpnConnection", "ec2:CreateVpnConnectionRoute", "ec2:CreateVpnGateway", - "ec2:CreatePlacementGroup", - "ec2:DeletePlacementGroup", - "ec2:DescribePlacementGroups", + "ec2:DeleteEgressOnlyInternetGateway", "ec2:DeleteFlowLogs", "ec2:DeleteNatGateway", "ec2:DeleteNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", + "ec2:DeletePlacementGroup", "ec2:DeleteSubnet", "ec2:DeleteTags", "ec2:DeleteVpc", + "ec2:DeleteVpcEndpointConnectionNotifications", "ec2:DeleteVpcEndpoints", + "ec2:DeleteVpcEndpointServiceConfigurations", "ec2:DeleteVpnConnection", "ec2:DeleteVpnConnectionRoute", "ec2:DeleteVpnGateway", + "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", "ec2:DescribeClassicLinkInstances", "ec2:DescribeCustomerGateways", - "ec2:DescribeVpcClassicLinkDnsSupport", "ec2:DescribeDhcpOptions", + "ec2:DescribeEgressOnlyInternetGateways", "ec2:DescribeFlowLogs", "ec2:DescribeInstances", "ec2:DescribeInternetGateways", @@ -11164,15 +25933,24 @@ aws_managed_policies_data = """ "ec2:DescribeNatGateways", "ec2:DescribeNetworkAcls", "ec2:DescribeNetworkInterfaceAttribute", + "ec2:DescribeNetworkInterfacePermissions", "ec2:DescribeNetworkInterfaces", + "ec2:DescribePlacementGroups", "ec2:DescribePrefixLists", "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroupReferences", "ec2:DescribeSecurityGroups", + "ec2:DescribeStaleSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcAttribute", "ec2:DescribeVpcClassicLink", + "ec2:DescribeVpcClassicLinkDnsSupport", + "ec2:DescribeVpcEndpointConnectionNotifications", + "ec2:DescribeVpcEndpointConnections", "ec2:DescribeVpcEndpoints", + "ec2:DescribeVpcEndpointServiceConfigurations", + "ec2:DescribeVpcEndpointServicePermissions", "ec2:DescribeVpcEndpointServices", "ec2:DescribeVpcPeeringConnections", "ec2:DescribeVpcs", @@ -11182,14 +25960,24 @@ aws_managed_policies_data = """ "ec2:DetachNetworkInterface", "ec2:DetachVpnGateway", "ec2:DisableVgwRoutePropagation", + "ec2:DisableVpcClassicLinkDnsSupport", "ec2:DisassociateAddress", "ec2:DisassociateRouteTable", + "ec2:DisassociateSubnetCidrBlock", + "ec2:DisassociateVpcCidrBlock", "ec2:EnableVgwRoutePropagation", + "ec2:EnableVpcClassicLinkDnsSupport", "ec2:ModifyNetworkInterfaceAttribute", "ec2:ModifySubnetAttribute", "ec2:ModifyVpcAttribute", "ec2:ModifyVpcEndpoint", + "ec2:ModifyVpcEndpointConnectionNotification", + "ec2:ModifyVpcEndpointServiceConfiguration", + "ec2:ModifyVpcEndpointServicePermissions", + "ec2:ModifyVpcPeeringConnectionOptions", + "ec2:ModifyVpcTenancy", "ec2:MoveAddressToVpc", + "ec2:RejectVpcEndpointConnections", "ec2:ReleaseAddress", "ec2:ReplaceNetworkAclAssociation", "ec2:ReplaceNetworkAclEntry", @@ -11197,7 +25985,10 @@ aws_managed_policies_data = """ "ec2:ReplaceRouteTableAssociation", "ec2:ResetNetworkInterfaceAttribute", "ec2:RestoreAddressToClassic", + "ec2:UnassignIpv6Addresses", "ec2:UnassignPrivateIpAddresses", + "ec2:UpdateSecurityGroupRuleDescriptionsEgress", + "ec2:UpdateSecurityGroupRuleDescriptionsIngress", "directconnect:*", "route53:*", "route53domains:*", @@ -11277,28 +26068,36 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/job-function/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJPNMADZFJCVPJVZA2", "PolicyName": "NetworkAdministrator", - "UpdateDate": "2017-03-20T18:44:58+00:00", - "VersionId": "v2" + "UpdateDate": "2018-12-13T19:43:41+00:00", + "VersionId": "v3" }, "PowerUserAccess": { "Arn": "arn:aws:iam::aws:policy/PowerUserAccess", "AttachmentCount": 0, - "CreateDate": "2016-12-06T18:11:16+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2015-02-06T18:39:47+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { "Effect": "Allow", "NotAction": [ "iam:*", - "organizations:*" + "organizations:*", + "account:*" ], "Resource": "*" }, { - "Action": "organizations:DescribeOrganization", + "Action": [ + "iam:CreateServiceLinkedRole", + "iam:DeleteServiceLinkedRole", + "iam:ListRoles", + "organizations:DescribeOrganization", + "account:ListRegions" + ], "Effect": "Allow", "Resource": "*" } @@ -11308,15 +26107,16 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJYRXTHIB4FOVS3ZXS", "PolicyName": "PowerUserAccess", - "UpdateDate": "2016-12-06T18:11:16+00:00", - "VersionId": "v2" + "UpdateDate": "2019-03-20T22:19:03+00:00", + "VersionId": "v4" }, "QuickSightAccessForS3StorageManagementAnalyticsReadOnly": { "Arn": "arn:aws:iam::aws:policy/service-role/QuickSightAccessForS3StorageManagementAnalyticsReadOnly", "AttachmentCount": 0, - "CreateDate": "2017-07-21T00:02:14+00:00", + "CreateDate": "2017-06-12T18:18:38+00:00", "DefaultVersionId": "v3", "Document": { "Statement": [ @@ -11345,6 +26145,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIFWG3L3WDMR4I7ZJW", "PolicyName": "QuickSightAccessForS3StorageManagementAnalyticsReadOnly", "UpdateDate": "2017-07-21T00:02:14+00:00", @@ -11377,6 +26178,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIWKFXRLQG2ROKKXLE", "PolicyName": "RDSCloudHsmAuthorizationRole", "UpdateDate": "2015-02-06T18:41:29+00:00", @@ -11385,31 +26187,55 @@ aws_managed_policies_data = """ "ReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/ReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-07-20T17:43:06+00:00", - "DefaultVersionId": "v29", + "CreateDate": "2015-02-06T18:39:48+00:00", + "DefaultVersionId": "v50", "Document": { "Statement": [ { "Action": [ + "a4b:Get*", + "a4b:List*", + "a4b:Describe*", + "a4b:Search*", "acm:Describe*", "acm:Get*", "acm:List*", + "acm-pca:Describe*", + "acm-pca:Get*", + "acm-pca:List*", + "amplify:GetApp", + "amplify:GetBranch", + "amplify:GetJob", + "amplify:GetDomainAssociation", + "amplify:ListApps", + "amplify:ListBranches", + "amplify:ListDomainAssociations", + "amplify:ListJobs", "apigateway:GET", "application-autoscaling:Describe*", + "appmesh:Describe*", + "appmesh:List*", "appstream:Describe*", "appstream:Get*", "appstream:List*", + "appsync:Get*", + "appsync:List*", + "autoscaling:Describe*", + "autoscaling-plans:Describe*", + "autoscaling-plans:GetScalingPlanResourceForecastData", "athena:List*", "athena:Batch*", "athena:Get*", - "autoscaling:Describe*", "batch:List*", "batch:Describe*", + "cloud9:Describe*", + "cloud9:List*", "clouddirectory:List*", "clouddirectory:BatchRead", "clouddirectory:Get*", "clouddirectory:LookupPolicy", "cloudformation:Describe*", + "cloudformation:Detect*", "cloudformation:Get*", "cloudformation:List*", "cloudformation:Estimate*", @@ -11431,6 +26257,7 @@ aws_managed_policies_data = """ "codebuild:BatchGet*", "codebuild:List*", "codecommit:BatchGet*", + "codecommit:Describe*", "codecommit:Get*", "codecommit:GitPull", "codecommit:List*", @@ -11443,13 +26270,15 @@ aws_managed_policies_data = """ "codestar:Describe*", "codestar:Get*", "codestar:Verify*", - "cognito-identity:List*", "cognito-identity:Describe*", + "cognito-identity:Get*", + "cognito-identity:List*", "cognito-identity:Lookup*", "cognito-sync:List*", "cognito-sync:Describe*", "cognito-sync:Get*", "cognito-sync:QueryRecords", + "cognito-idp:AdminGet*", "cognito-idp:AdminList*", "cognito-idp:List*", "cognito-idp:Describe*", @@ -11460,20 +26289,28 @@ aws_managed_policies_data = """ "config:List*", "connect:List*", "connect:Describe*", - "connect:Get*", + "connect:GetFederationToken", + "datasync:Describe*", + "datasync:List*", "datapipeline:Describe*", "datapipeline:EvaluateExpression", "datapipeline:Get*", "datapipeline:List*", "datapipeline:QueryObjects", "datapipeline:Validate*", + "dax:BatchGetItem", + "dax:Describe*", + "dax:GetItem", + "dax:ListTags", + "dax:Query", + "dax:Scan", "directconnect:Describe*", - "directconnect:Confirm*", "devicefarm:List*", "devicefarm:Get*", "discovery:Describe*", "discovery:List*", "discovery:Get*", + "dlm:Get*", "dms:Describe*", "dms:List*", "dms:Test*", @@ -11490,6 +26327,7 @@ aws_managed_policies_data = """ "dynamodb:Scan", "ec2:Describe*", "ec2:Get*", + "ec2:SearchTransitGatewayRoutes", "ec2messages:Get*", "ecr:BatchCheck*", "ecr:BatchGet*", @@ -11498,6 +26336,10 @@ aws_managed_policies_data = """ "ecr:List*", "ecs:Describe*", "ecs:List*", + "eks:DescribeCluster", + "eks:DescribeUpdates", + "eks:ListClusters", + "eks:ListUpdates", "elasticache:Describe*", "elasticache:List*", "elasticbeanstalk:Check*", @@ -11515,6 +26357,7 @@ aws_managed_policies_data = """ "elastictranscoder:Read*", "es:Describe*", "es:List*", + "es:Get*", "es:ESHttpGet", "es:ESHttpHead", "events:Describe*", @@ -11522,6 +26365,8 @@ aws_managed_policies_data = """ "events:Test*", "firehose:Describe*", "firehose:List*", + "fsx:Describe*", + "fsx:List*", "gamelift:List*", "gamelift:Get*", "gamelift:Describe*", @@ -11531,6 +26376,45 @@ aws_managed_policies_data = """ "glacier:List*", "glacier:Describe*", "glacier:Get*", + "globalaccelerator:Describe*", + "globalaccelerator:List*", + "glue:BatchGetPartition", + "glue:GetCatalogImportStatus", + "glue:GetClassifier", + "glue:GetClassifiers", + "glue:GetCrawler", + "glue:GetCrawlers", + "glue:GetCrawlerMetrics", + "glue:GetDatabase", + "glue:GetDatabases", + "glue:GetDataCatalogEncryptionSettings", + "glue:GetDataflowGraph", + "glue:GetDevEndpoint", + "glue:GetDevEndpoints", + "glue:GetJob", + "glue:GetJobs", + "glue:GetJobRun", + "glue:GetJobRuns", + "glue:GetMapping", + "glue:GetPartition", + "glue:GetPartitions", + "glue:GetPlan", + "glue:GetResourcePolicy", + "glue:GetSecurityConfiguration", + "glue:GetSecurityConfigurations", + "glue:GetTable", + "glue:GetTables", + "glue:GetTableVersion", + "glue:GetTableVersions", + "glue:GetTags", + "glue:GetTrigger", + "glue:GetTriggers", + "glue:GetUserDefinedFunction", + "glue:GetUserDefinedFunctions", + "greengrass:Get*", + "greengrass:List*", + "guardduty:Get*", + "guardduty:List*", "health:Describe*", "health:Get*", "health:List*", @@ -11548,10 +26432,20 @@ aws_managed_policies_data = """ "iot:Describe*", "iot:Get*", "iot:List*", + "iotanalytics:Describe*", + "iotanalytics:List*", + "iotanalytics:Get*", + "iotanalytics:SampleChannelData", + "kafka:Describe*", + "kafka:List*", + "kafka:Get*", "kinesisanalytics:Describe*", "kinesisanalytics:Discover*", "kinesisanalytics:Get*", "kinesisanalytics:List*", + "kinesisvideo:Describe*", + "kinesisvideo:Get*", + "kinesisvideo:List*", "kinesis:Describe*", "kinesis:Get*", "kinesis:List*", @@ -11561,27 +26455,80 @@ aws_managed_policies_data = """ "lambda:List*", "lambda:Get*", "lex:Get*", - "lightsail:Get*", + "lightsail:GetActiveNames", + "lightsail:GetBlueprints", + "lightsail:GetBundles", + "lightsail:GetCloudFormationStackRecords", + "lightsail:GetDisk", + "lightsail:GetDisks", + "lightsail:GetDiskSnapshot", + "lightsail:GetDiskSnapshots", + "lightsail:GetDomain", + "lightsail:GetDomains", + "lightsail:GetExportSnapshotRecords", + "lightsail:GetInstance", + "lightsail:GetInstanceMetricData", + "lightsail:GetInstancePortStates", + "lightsail:GetInstances", + "lightsail:GetInstanceSnapshot", + "lightsail:GetInstanceSnapshots", + "lightsail:GetInstanceState", + "lightsail:GetKeyPair", + "lightsail:GetKeyPairs", + "lightsail:GetLoadBalancer", + "lightsail:GetLoadBalancerMetricData", + "lightsail:GetLoadBalancers", + "lightsail:GetLoadBalancerTlsCertificates", + "lightsail:GetOperation", + "lightsail:GetOperations", + "lightsail:GetOperationsForResource", + "lightsail:GetRegions", + "lightsail:GetRelationalDatabase", + "lightsail:GetRelationalDatabaseBlueprints", + "lightsail:GetRelationalDatabaseBundles", + "lightsail:GetRelationalDatabaseEvents", + "lightsail:GetRelationalDatabaseLogEvents", + "lightsail:GetRelationalDatabaseLogStreams", + "lightsail:GetRelationalDatabaseMetricData", + "lightsail:GetRelationalDatabaseParameters", + "lightsail:GetRelationalDatabases", + "lightsail:GetRelationalDatabaseSnapshot", + "lightsail:GetRelationalDatabaseSnapshots", + "lightsail:GetResources", + "lightsail:GetStaticIp", + "lightsail:GetStaticIps", + "lightsail:GetTagKeys", + "lightsail:GetTagValues", "lightsail:Is*", - "lightsail:Download*", + "lightsail:List*", "logs:Describe*", "logs:Get*", "logs:FilterLogEvents", "logs:ListTagsLogGroup", + "logs:StartQuery", "logs:TestMetricFilter", "machinelearning:Describe*", "machinelearning:Get*", + "mgh:Describe*", + "mgh:List*", "mobileanalytics:Get*", + "mobilehub:Describe*", + "mobilehub:Export*", + "mobilehub:Generate*", "mobilehub:Get*", "mobilehub:List*", "mobilehub:Validate*", "mobilehub:Verify*", "mobiletargeting:Get*", + "mq:Describe*", + "mq:List*", "opsworks:Describe*", "opsworks:Get*", "opsworks-cm:Describe*", "organizations:Describe*", "organizations:List*", + "pi:DescribeDimensionKeys", + "pi:GetResourceMetrics", "polly:Describe*", "polly:Get*", "polly:List*", @@ -11594,8 +26541,15 @@ aws_managed_policies_data = """ "rds:List*", "rds:Download*", "redshift:Describe*", + "redshift:GetReservedNodeExchangeOfferings", "redshift:View*", - "redshift:Get*", + "resource-groups:Describe*", + "resource-groups:Get*", + "resource-groups:List*", + "resource-groups:Search*", + "robomaker:BatchDescribe*", + "robomaker:Describe*", + "robomaker:List*", "route53:Get*", "route53:List*", "route53:Test*", @@ -11606,19 +26560,34 @@ aws_managed_policies_data = """ "s3:Get*", "s3:List*", "s3:Head*", + "sagemaker:Describe*", + "sagemaker:List*", "sdb:Get*", "sdb:List*", "sdb:Select*", + "secretsmanager:List*", + "secretsmanager:Describe*", + "secretsmanager:GetResourcePolicy", + "securityhub:Get*", + "securityhub:List*", + "serverlessrepo:List*", + "serverlessrepo:Get*", + "serverlessrepo:SearchApplications", "servicecatalog:List*", "servicecatalog:Scan*", "servicecatalog:Search*", "servicecatalog:Describe*", + "servicediscovery:Get*", + "servicediscovery:List*", "ses:Get*", "ses:List*", "ses:Describe*", - "ses:Verify*", "shield:Describe*", + "shield:Get*", "shield:List*", + "snowball:Get*", + "snowball:Describe*", + "snowball:List*", "sns:Get*", "sns:List*", "sns:Check*", @@ -11639,6 +26608,11 @@ aws_managed_policies_data = """ "swf:Get*", "swf:List*", "tag:Get*", + "transfer:Describe*", + "transfer:List*", + "transfer:TestIdentityProvider", + "transcribe:Get*", + "transcribe:List*", "trustedadvisor:Describe*", "waf:Get*", "waf:List*", @@ -11647,6 +26621,8 @@ aws_managed_policies_data = """ "workdocs:Describe*", "workdocs:Get*", "workdocs:CheckAlias", + "worklink:Describe*", + "worklink:List*", "workmail:Describe*", "workmail:Get*", "workmail:List*", @@ -11664,16 +26640,17 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAILL3HVNFSB6DCOWYQ", "PolicyName": "ReadOnlyAccess", - "UpdateDate": "2017-07-20T17:43:06+00:00", - "VersionId": "v29" + "UpdateDate": "2019-06-03T20:01:28+00:00", + "VersionId": "v50" }, "ResourceGroupsandTagEditorFullAccess": { "Arn": "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorFullAccess", "AttachmentCount": 0, "CreateDate": "2015-02-06T18:39:53+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -11681,8 +26658,13 @@ aws_managed_policies_data = """ "tag:getResources", "tag:getTagKeys", "tag:getTagValues", - "tag:addResourceTags", - "tag:removeResourceTags" + "tag:TagResources", + "tag:UntagResources", + "tag:AddResourceTags", + "tag:RemoveResourceTags", + "resource-groups:*", + "cloudformation:DescribeStacks", + "cloudformation:ListStackResources" ], "Effect": "Allow", "Resource": "*" @@ -11693,23 +26675,29 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJNOS54ZFXN4T2Y34A", "PolicyName": "ResourceGroupsandTagEditorFullAccess", - "UpdateDate": "2015-02-06T18:39:53+00:00", - "VersionId": "v1" + "UpdateDate": "2019-03-07T21:54:03+00:00", + "VersionId": "v4" }, "ResourceGroupsandTagEditorReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/ResourceGroupsandTagEditorReadOnlyAccess", "AttachmentCount": 0, "CreateDate": "2015-02-06T18:39:54+00:00", - "DefaultVersionId": "v1", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ "tag:getResources", "tag:getTagKeys", - "tag:getTagValues" + "tag:getTagValues", + "resource-groups:Get*", + "resource-groups:List*", + "resource-groups:Search*", + "cloudformation:DescribeStacks", + "cloudformation:ListStackResources" ], "Effect": "Allow", "Resource": "*" @@ -11720,35 +26708,119 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJHXQTPI5I5JKAIU74", "PolicyName": "ResourceGroupsandTagEditorReadOnlyAccess", - "UpdateDate": "2015-02-06T18:39:54+00:00", - "VersionId": "v1" + "UpdateDate": "2019-03-07T19:43:17+00:00", + "VersionId": "v2" }, - "SecurityAudit": { - "Arn": "arn:aws:iam::aws:policy/SecurityAudit", + "SecretsManagerReadWrite": { + "Arn": "arn:aws:iam::aws:policy/SecretsManagerReadWrite", "AttachmentCount": 0, - "CreateDate": "2017-07-12T20:16:44+00:00", - "DefaultVersionId": "v12", + "CreateDate": "2018-04-04T18:05:29+00:00", + "DefaultVersionId": "v2", "Document": { "Statement": [ { "Action": [ - "acm:ListCertificates", - "acm:DescribeCertificate", - "cloudformation:getStackPolicy", - "logs:describeLogGroups", - "logs:describeMetricFilters", + "secretsmanager:*", + "cloudformation:CreateChangeSet", + "cloudformation:DescribeChangeSet", + "cloudformation:DescribeStackResource", + "cloudformation:DescribeStacks", + "cloudformation:ExecuteChangeSet", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVpcs", + "kms:DescribeKey", + "kms:ListAliases", + "kms:ListKeys", + "lambda:ListFunctions", + "rds:DescribeDBClusters", + "rds:DescribeDBInstances", + "tag:GetResources" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "lambda:AddPermission", + "lambda:CreateFunction", + "lambda:GetFunction", + "lambda:InvokeFunction", + "lambda:UpdateFunctionConfiguration" + ], + "Effect": "Allow", + "Resource": "arn:aws:lambda:*:*:function:SecretsManager*" + }, + { + "Action": [ + "serverlessrepo:CreateCloudFormationChangeSet" + ], + "Effect": "Allow", + "Resource": "arn:aws:serverlessrepo:*:*:applications/SecretsManager*" + }, + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::awsserverlessrepo-changesets*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAI3VG7CI5BIQZQ6G2E", + "PolicyName": "SecretsManagerReadWrite", + "UpdateDate": "2018-05-03T20:02:35+00:00", + "VersionId": "v2" + }, + "SecurityAudit": { + "Arn": "arn:aws:iam::aws:policy/SecurityAudit", + "AttachmentCount": 0, + "CreateDate": "2015-02-06T18:41:01+00:00", + "DefaultVersionId": "v27", + "Document": { + "Statement": [ + { + "Action": [ + "acm:Describe*", + "acm:List*", + "application-autoscaling:Describe*", + "appmesh:Describe*", + "appmesh:List*", + "appsync:List*", + "athena:List*", "autoscaling:Describe*", + "batch:DescribeComputeEnvironments", + "batch:DescribeJobDefinitions", + "chime:List*", + "cloud9:Describe*", + "cloud9:ListEnvironments", + "clouddirectory:ListDirectories", "cloudformation:DescribeStack*", "cloudformation:GetTemplate", "cloudformation:ListStack*", + "cloudformation:GetStackPolicy", "cloudfront:Get*", "cloudfront:List*", + "cloudhsm:ListHapgs", + "cloudhsm:ListHsms", + "cloudhsm:ListLunaClients", + "cloudsearch:DescribeDomains", + "cloudsearch:DescribeServiceAccessPolicies", "cloudtrail:DescribeTrails", + "cloudtrail:GetEventSelectors", "cloudtrail:GetTrailStatus", "cloudtrail:ListTags", + "cloudtrail:LookupEvents", "cloudwatch:Describe*", + "codebuild:ListProjects", "codecommit:BatchGetRepositories", "codecommit:GetBranch", "codecommit:GetObjectIdentifier", @@ -11757,9 +26829,21 @@ aws_managed_policies_data = """ "codedeploy:Batch*", "codedeploy:Get*", "codedeploy:List*", + "codepipeline:ListPipelines", + "codestar:Describe*", + "codestar:List*", + "cognito-identity:ListIdentityPools", + "cognito-idp:ListUserPools", + "cognito-sync:Describe*", + "cognito-sync:List*", + "comprehend:Describe*", + "comprehend:List*", + "config:BatchGetAggregateResourceConfig", + "config:BatchGetResourceConfig", "config:Deliver*", "config:Describe*", "config:Get*", + "config:List*", "datapipeline:DescribeObjects", "datapipeline:DescribePipelines", "datapipeline:EvaluateExpression", @@ -11767,83 +26851,204 @@ aws_managed_policies_data = """ "datapipeline:ListPipelines", "datapipeline:QueryObjects", "datapipeline:ValidatePipelineDefinition", + "datasync:Describe*", + "datasync:List*", + "dax:Describe*", + "dax:ListTags", "directconnect:Describe*", + "dms:Describe*", + "dms:ListTagsForResource", + "ds:DescribeDirectories", + "dynamodb:DescribeContinuousBackups", + "dynamodb:DescribeGlobalTable", + "dynamodb:DescribeTable", + "dynamodb:DescribeTimeToLive", + "dynamodb:ListBackups", + "dynamodb:ListGlobalTables", + "dynamodb:ListStreams", "dynamodb:ListTables", "ec2:Describe*", + "ecr:DescribeRepositories", + "ecr:GetRepositoryPolicy", "ecs:Describe*", "ecs:List*", + "eks:DescribeCluster", + "eks:ListClusters", "elasticache:Describe*", "elasticbeanstalk:Describe*", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DescribeMountTargetSecurityGroups", + "elasticfilesystem:DescribeMountTargets", "elasticloadbalancing:Describe*", - "elasticmapreduce:DescribeJobFlows", + "elasticmapreduce:Describe*", "elasticmapreduce:ListClusters", "elasticmapreduce:ListInstances", - "es:ListDomainNames", "es:Describe*", + "es:ListDomainNames", + "events:Describe*", + "events:List*", "firehose:Describe*", "firehose:List*", + "fms:ListComplianceStatus", + "fms:ListPolicies", + "fsx:Describe*", + "fsx:List*", + "gamelift:ListBuilds", + "gamelift:ListFleets", "glacier:DescribeVault", "glacier:GetVaultAccessPolicy", "glacier:ListVaults", + "globalaccelerator:Describe*", + "globalaccelerator:List*", + "greengrass:List*", + "guardduty:Get*", + "guardduty:List*", "iam:GenerateCredentialReport", + "iam:GenerateServiceLastAccessedDetails", "iam:Get*", "iam:List*", + "iam:SimulateCustomPolicy", + "iam:SimulatePrincipalPolicy", + "inspector:Describe*", + "inspector:Get*", + "inspector:List*", + "inspector:Preview*", + "iot:Describe*", + "iot:GetPolicy", + "iot:GetPolicyVersion", + "iot:List*", + "kinesis:DescribeStream", + "kinesis:ListStreams", + "kinesis:ListTagsForStream", + "kinesisanalytics:ListApplications", "kms:Describe*", "kms:Get*", "kms:List*", + "lambda:GetAccountSettings", + "lambda:GetFunctionConfiguration", + "lambda:GetLayerVersionPolicy", "lambda:GetPolicy", - "lambda:ListFunctions", + "lambda:List*", + "license-manager:List*", + "lightsail:GetInstances", + "logs:Describe*", + "logs:ListTagsLogGroup", + "machinelearning:DescribeMLModels", + "mediaconnect:Describe*", + "mediaconnect:List*", + "mediastore:GetContainerPolicy", + "mediastore:ListContainers", + "opsworks:DescribeStacks", + "opsworks-cm:DescribeServers", + "organizations:List*", + "organizations:Describe*", + "quicksight:Describe*", + "quicksight:List*", + "ram:List*", "rds:Describe*", "rds:DownloadDBLogFilePortion", "rds:ListTagsForResource", "redshift:Describe*", - "route53:GetChange", - "route53:GetCheckerIpRanges", - "route53:GetGeoLocation", - "route53:GetHealthCheck", - "route53:GetHealthCheckCount", - "route53:GetHealthCheckLastFailureReason", - "route53:GetHostedZone", - "route53:GetHostedZoneCount", - "route53:GetReusableDelegationSet", - "route53:ListGeoLocations", - "route53:ListHealthChecks", - "route53:ListHostedZones", - "route53:ListHostedZonesByName", - "route53:ListResourceRecordSets", - "route53:ListReusableDelegationSets", - "route53:ListTagsForResource", - "route53:ListTagsForResources", + "rekognition:Describe*", + "rekognition:List*", + "robomaker:Describe*", + "robomaker:List*", + "route53:Get*", + "route53:List*", "route53domains:GetDomainDetail", "route53domains:GetOperationDetail", "route53domains:ListDomains", "route53domains:ListOperations", "route53domains:ListTagsForDomain", - "s3:GetBucket*", + "route53resolver:List*", "s3:GetAccelerateConfiguration", + "s3:GetAccountPublicAccessBlock", "s3:GetAnalyticsConfiguration", + "s3:GetBucket*", + "s3:GetEncryptionConfiguration", "s3:GetInventoryConfiguration", - "s3:GetMetricsConfiguration", - "s3:GetReplicationConfiguration", "s3:GetLifecycleConfiguration", + "s3:GetMetricsConfiguration", "s3:GetObjectAcl", "s3:GetObjectVersionAcl", + "s3:GetPublicAccessBlock", + "s3:GetReplicationConfiguration", "s3:ListAllMyBuckets", + "sagemaker:Describe*", + "sagemaker:List*", "sdb:DomainMetadata", "sdb:ListDomains", + "secretsmanager:GetResourcePolicy", + "secretsmanager:ListSecrets", + "secretsmanager:ListSecretVersionIds", + "securityhub:Get*", + "securityhub:List*", + "serverlessrepo:GetApplicationPolicy", + "serverlessrepo:List*", "ses:GetIdentityDkimAttributes", "ses:GetIdentityVerificationAttributes", "ses:ListIdentities", + "ses:ListVerifiedEmailAddresses", + "shield:Describe*", + "shield:List*", + "snowball:ListClusters", + "snowball:ListJobs", "sns:GetTopicAttributes", "sns:ListSubscriptionsByTopic", "sns:ListTopics", "sqs:GetQueueAttributes", + "sqs:ListDeadLetterSourceQueues", "sqs:ListQueues", + "sqs:ListQueueTags", + "ssm:Describe*", + "ssm:ListDocuments", + "sso:DescribePermissionsPolicies", + "sso:List*", + "states:ListStateMachines", + "storagegateway:DescribeBandwidthRateLimit", + "storagegateway:DescribeCache", + "storagegateway:DescribeCachediSCSIVolumes", + "storagegateway:DescribeGatewayInformation", + "storagegateway:DescribeMaintenanceStartTime", + "storagegateway:DescribeNFSFileShares", + "storagegateway:DescribeSnapshotSchedule", + "storagegateway:DescribeStorediSCSIVolumes", + "storagegateway:DescribeTapeArchives", + "storagegateway:DescribeTapeRecoveryPoints", + "storagegateway:DescribeTapes", + "storagegateway:DescribeUploadBuffer", + "storagegateway:DescribeVTLDevices", + "storagegateway:DescribeWorkingStorage", + "storagegateway:List*", "tag:GetResources", - "tag:GetTagKeys" + "tag:GetTagKeys", + "transfer:Describe*", + "transfer:List*", + "translate:List*", + "trustedadvisor:Describe*", + "waf:ListWebACLs", + "waf-regional:ListWebACLs", + "workspaces:Describe*" ], "Effect": "Allow", "Resource": "*" + }, + { + "Action": [ + "apigateway:HEAD", + "apigateway:GET", + "apigateway:OPTIONS" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:apigateway:*::/restapis", + "arn:aws:apigateway:*::/restapis/*/authorizers", + "arn:aws:apigateway:*::/restapis/*/authorizers/*", + "arn:aws:apigateway:*::/restapis/*/resources", + "arn:aws:apigateway:*::/restapis/*/resources/*", + "arn:aws:apigateway:*::/restapis/*/resources/*/methods/*", + "arn:aws:apigateway:*::/vpclinks" + ] } ], "Version": "2012-10-17" @@ -11851,10 +27056,11 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIX2T3QCXHR2OGGCTO", "PolicyName": "SecurityAudit", - "UpdateDate": "2017-07-12T20:16:44+00:00", - "VersionId": "v12" + "UpdateDate": "2019-04-29T18:33:52+00:00", + "VersionId": "v27" }, "ServerMigrationConnector": { "Arn": "arn:aws:iam::aws:policy/ServerMigrationConnector", @@ -11923,18 +27129,135 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJKZRWXIPK5HSG3QDQ", "PolicyName": "ServerMigrationConnector", "UpdateDate": "2016-10-24T21:45:56+00:00", "VersionId": "v1" }, + "ServerMigrationServiceLaunchRole": { + "Arn": "arn:aws:iam::aws:policy/service-role/ServerMigrationServiceLaunchRole", + "AttachmentCount": 0, + "CreateDate": "2018-11-26T19:53:06+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:ModifyInstanceAttribute", + "ec2:StopInstances", + "ec2:StartInstances", + "ec2:TerminateInstances" + ], + "Condition": { + "ForAllValues:StringLike": { + "ec2:ResourceTag/aws:cloudformation:stack-id": "arn:aws:cloudformation:*:*:stack/sms-app-*/*" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateTags", + "Effect": "Allow", + "Resource": "arn:aws:ec2:*:*:instance/*" + }, + { + "Action": [ + "ec2:RunInstances", + "ec2:Describe*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIIIAAMVUCBR2OLXZO", + "PolicyName": "ServerMigrationServiceLaunchRole", + "UpdateDate": "2018-11-26T19:53:06+00:00", + "VersionId": "v1" + }, "ServerMigrationServiceRole": { "Arn": "arn:aws:iam::aws:policy/service-role/ServerMigrationServiceRole", "AttachmentCount": 0, - "CreateDate": "2017-06-16T18:02:04+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2016-10-24T21:19:00+00:00", + "DefaultVersionId": "v3", "Document": { "Statement": [ + { + "Action": [ + "cloudformation:CreateChangeSet", + "cloudformation:CreateStack", + "cloudformation:DeleteStack", + "cloudformation:ExecuteChangeSet" + ], + "Condition": { + "ForAllValues:StringLikeIfExists": { + "cloudformation:ResourceTypes": [ + "AWS::EC2::*" + ] + } + }, + "Effect": "Allow", + "Resource": "arn:aws:cloudformation:*:*:stack/sms-app-*/*" + }, + { + "Action": [ + "cloudformation:DeleteChangeSet", + "cloudformation:DescribeChangeSet", + "cloudformation:DescribeStackEvents", + "cloudformation:DescribeStackResources", + "cloudformation:GetTemplate" + ], + "Effect": "Allow", + "Resource": "arn:aws:cloudformation:*:*:stack/sms-app-*/*" + }, + { + "Action": [ + "cloudformation:DescribeStacks", + "cloudformation:ValidateTemplate", + "cloudformation:DescribeStackResource", + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "s3:CreateBucket", + "s3:DeleteBucket", + "s3:DeleteObject", + "s3:GetBucketAcl", + "s3:GetBucketLocation", + "s3:GetObject", + "s3:ListBucket", + "s3:PutObject", + "s3:PutObjectAcl", + "s3:PutLifecycleConfiguration", + "s3:ListAllMyBuckets" + ], + "Effect": "Allow", + "Resource": "arn:aws:s3:::sms-app-*" + }, + { + "Action": [ + "sms:CreateReplicationJob", + "sms:DeleteReplicationJob", + "sms:GetReplicationJobs", + "sms:GetReplicationRuns", + "sms:GetServers", + "sms:ImportServerCatalog", + "sms:StartOnDemandReplicationRun", + "sms:UpdateReplicationJob" + ], + "Effect": "Allow", + "Resource": "*" + }, { "Action": [ "ec2:ModifySnapshotAttribute", @@ -11948,50 +27271,19 @@ aws_managed_policies_data = """ ], "Effect": "Allow", "Resource": "*" - } - ], - "Version": "2012-10-17" - }, - "IsAttachable": true, - "IsDefaultVersion": true, - "Path": "/service-role/", - "PolicyId": "ANPAJMBH3M6BO63XFW2D4", - "PolicyName": "ServerMigrationServiceRole", - "UpdateDate": "2017-06-16T18:02:04+00:00", - "VersionId": "v2" - }, - "ServiceCatalogAdminFullAccess": { - "Arn": "arn:aws:iam::aws:policy/ServiceCatalogAdminFullAccess", - "AttachmentCount": 0, - "CreateDate": "2016-11-11T18:40:24+00:00", - "DefaultVersionId": "v2", - "Document": { - "Statement": [ + }, { - "Action": [ - "catalog-admin:*", - "catalog-user:*", - "cloudformation:CreateStack", - "cloudformation:CreateUploadBucket", - "cloudformation:DeleteStack", - "cloudformation:DescribeStackEvents", - "cloudformation:DescribeStacks", - "cloudformation:GetTemplateSummary", - "cloudformation:SetStackPolicy", - "cloudformation:ValidateTemplate", - "cloudformation:UpdateStack", - "iam:GetGroup", - "iam:GetRole", - "iam:GetUser", - "iam:ListGroups", - "iam:ListRoles", - "iam:ListUsers", - "iam:PassRole", - "s3:CreateBucket", - "s3:GetObject", - "s3:PutObject", - "servicecatalog:*" - ], + "Action": "iam:GetRole", + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "iam:PassRole", + "Condition": { + "StringLike": { + "iam:AssociatedResourceArn": "arn:aws:cloudformation:*:*:stack/sms-app-*/*" + } + }, "Effect": "Allow", "Resource": "*" } @@ -12000,16 +27292,17 @@ aws_managed_policies_data = """ }, "IsAttachable": true, "IsDefaultVersion": true, - "Path": "/", - "PolicyId": "ANPAIKTX42IAS75B7B7BY", - "PolicyName": "ServiceCatalogAdminFullAccess", - "UpdateDate": "2016-11-11T18:40:24+00:00", - "VersionId": "v2" + "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJMBH3M6BO63XFW2D4", + "PolicyName": "ServerMigrationServiceRole", + "UpdateDate": "2018-11-26T19:33:29+00:00", + "VersionId": "v3" }, "ServiceCatalogAdminReadOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/ServiceCatalogAdminReadOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-08T18:57:36+00:00", + "CreateDate": "2015-09-29T18:40:35+00:00", "DefaultVersionId": "v5", "Document": { "Statement": [ @@ -12080,6 +27373,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ7XOUSS75M4LIPKO4", "PolicyName": "ServiceCatalogAdminReadOnlyAccess", "UpdateDate": "2017-08-08T18:57:36+00:00", @@ -12088,7 +27382,7 @@ aws_managed_policies_data = """ "ServiceCatalogEndUserAccess": { "Arn": "arn:aws:iam::aws:policy/ServiceCatalogEndUserAccess", "AttachmentCount": 0, - "CreateDate": "2017-08-08T18:58:57+00:00", + "CreateDate": "2015-09-29T18:41:33+00:00", "DefaultVersionId": "v4", "Document": { "Statement": [ @@ -12126,68 +27420,12 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJ56OMCO72RI4J5FSA", "PolicyName": "ServiceCatalogEndUserAccess", "UpdateDate": "2017-08-08T18:58:57+00:00", "VersionId": "v4" }, - "ServiceCatalogEndUserFullAccess": { - "Arn": "arn:aws:iam::aws:policy/ServiceCatalogEndUserFullAccess", - "AttachmentCount": 0, - "CreateDate": "2017-08-08T18:58:54+00:00", - "DefaultVersionId": "v4", - "Document": { - "Statement": [ - { - "Action": [ - "catalog-user:*", - "cloudformation:CreateStack", - "cloudformation:DeleteStack", - "cloudformation:DescribeStackEvents", - "cloudformation:DescribeStacks", - "cloudformation:GetTemplateSummary", - "cloudformation:SetStackPolicy", - "cloudformation:ValidateTemplate", - "cloudformation:UpdateStack", - "servicecatalog:DescribeProduct", - "servicecatalog:DescribeProductView", - "servicecatalog:DescribeProvisioningParameters", - "servicecatalog:ListLaunchPaths", - "servicecatalog:ProvisionProduct", - "servicecatalog:SearchProducts", - "s3:GetObject" - ], - "Effect": "Allow", - "Resource": "*" - }, - { - "Action": [ - "servicecatalog:DescribeProvisionedProduct", - "servicecatalog:DescribeRecord", - "servicecatalog:ListRecordHistory", - "servicecatalog:ScanProvisionedProducts", - "servicecatalog:TerminateProvisionedProduct", - "servicecatalog:UpdateProvisionedProduct" - ], - "Condition": { - "StringEquals": { - "servicecatalog:userLevel": "self" - } - }, - "Effect": "Allow", - "Resource": "*" - } - ], - "Version": "2012-10-17" - }, - "IsAttachable": true, - "IsDefaultVersion": true, - "Path": "/", - "PolicyId": "ANPAJIW7AFFOONVKW75KU", - "PolicyName": "ServiceCatalogEndUserFullAccess", - "UpdateDate": "2017-08-08T18:58:54+00:00", - "VersionId": "v4" - }, "SimpleWorkflowFullAccess": { "Arn": "arn:aws:iam::aws:policy/SimpleWorkflowFullAccess", "AttachmentCount": 0, @@ -12208,6 +27446,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAIFE3AV6VE7EANYBVM", "PolicyName": "SimpleWorkflowFullAccess", "UpdateDate": "2015-02-06T18:41:04+00:00", @@ -12216,7 +27455,7 @@ aws_managed_policies_data = """ "SupportUser": { "Arn": "arn:aws:iam::aws:policy/job-function/SupportUser", "AttachmentCount": 0, - "CreateDate": "2017-05-17T23:11:51+00:00", + "CreateDate": "2016-11-10T17:21:53+00:00", "DefaultVersionId": "v2", "Document": { "Statement": [ @@ -12434,6 +27673,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/job-function/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAI3V4GSSN5SJY3P2RO", "PolicyName": "SupportUser", "UpdateDate": "2017-05-17T23:11:51+00:00", @@ -12442,8 +27682,8 @@ aws_managed_policies_data = """ "SystemAdministrator": { "Arn": "arn:aws:iam::aws:policy/job-function/SystemAdministrator", "AttachmentCount": 0, - "CreateDate": "2017-03-24T17:45:43+00:00", - "DefaultVersionId": "v2", + "CreateDate": "2016-11-10T17:23:56+00:00", + "DefaultVersionId": "v4", "Document": { "Statement": [ { @@ -12554,6 +27794,8 @@ aws_managed_policies_data = """ "ec2:RunScheduledInstances", "ec2:UnassignPrivateIpAddresses", "ec2:UnmonitorInstances", + "ec2:UpdateSecurityGroupRuleDescriptionsEgress", + "ec2:UpdateSecurityGroupRuleDescriptionsIngress", "elasticloadbalancing:*", "events:*", "iam:GetAccount*", @@ -12688,7 +27930,8 @@ aws_managed_policies_data = """ "arn:aws:iam::*:role/rds-monitoring-role", "arn:aws:iam::*:role/ec2-sysadmin-*", "arn:aws:iam::*:role/ecr-sysadmin-*", - "arn:aws:iam::*:role/lamdba-sysadmin-*" + "arn:aws:iam::*:role/lamdba-sysadmin-*", + "arn:aws:iam::*:role/lambda-sysadmin-*" ] } ], @@ -12697,11 +27940,120 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/job-function/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAITJPEZXCYCBXANDSW", "PolicyName": "SystemAdministrator", - "UpdateDate": "2017-03-24T17:45:43+00:00", + "UpdateDate": "2018-10-08T21:33:45+00:00", + "VersionId": "v4" + }, + "TagPoliciesServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/TagPoliciesServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-10-26T20:02:52+00:00", + "DefaultVersionId": "v2", + "Document": { + "Statement": [ + { + "Action": [ + "organizations:ListAccounts", + "organizations:ListAccountsForParent", + "organizations:ListChildren", + "organizations:DescribeAccount", + "organizations:DescribeOrganization", + "organizations:ListRoots", + "organizations:ListParents" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "organizations:DisableAWSServiceAccess" + ], + "Condition": { + "ForAllValues:StringLike": { + "organizations:ServicePrincipal": [ + "tagpolicies.tag.amazonaws.com" + ] + } + }, + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJGGCZXCABSYJA7UBI", + "PolicyName": "TagPoliciesServiceRolePolicy", + "UpdateDate": "2019-05-10T21:38:33+00:00", "VersionId": "v2" }, + "TranslateFullAccess": { + "Arn": "arn:aws:iam::aws:policy/TranslateFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-27T23:36:20+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "translate:*", + "comprehend:DetectDominantLanguage", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIAPOAEI2VFQYUK5RY", + "PolicyName": "TranslateFullAccess", + "UpdateDate": "2018-11-27T23:36:20+00:00", + "VersionId": "v1" + }, + "TranslateReadOnly": { + "Arn": "arn:aws:iam::aws:policy/TranslateReadOnly", + "AttachmentCount": 0, + "CreateDate": "2017-11-29T18:22:00+00:00", + "DefaultVersionId": "v4", + "Document": { + "Statement": [ + { + "Action": [ + "translate:TranslateText", + "translate:GetTerminology", + "translate:ListTerminologies", + "comprehend:DetectDominantLanguage", + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJYAMZMTQNWUDJKY2E", + "PolicyName": "TranslateReadOnly", + "UpdateDate": "2018-11-27T23:29:08+00:00", + "VersionId": "v4" + }, "VMImportExportRoleForAWSConnector": { "Arn": "arn:aws:iam::aws:policy/service-role/VMImportExportRoleForAWSConnector", "AttachmentCount": 0, @@ -12736,6 +28088,7 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/service-role/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAJFLQOOJ6F5XNX4LAW", "PolicyName": "VMImportExportRoleForAWSConnector", "UpdateDate": "2015-09-03T20:48:59+00:00", @@ -12744,8 +28097,8 @@ aws_managed_policies_data = """ "ViewOnlyAccess": { "Arn": "arn:aws:iam::aws:policy/job-function/ViewOnlyAccess", "AttachmentCount": 0, - "CreateDate": "2017-06-26T22:35:31+00:00", - "DefaultVersionId": "v3", + "CreateDate": "2016-11-10T17:20:15+00:00", + "DefaultVersionId": "v7", "Document": { "Statement": [ { @@ -12771,7 +28124,7 @@ aws_managed_policies_data = """ "cloudtrail:DescribeTrails", "cloudtrail:LookupEvents", "cloudwatch:List*", - "cloudwatch:GetMetricData", + "cloudwatch:Get*", "codebuild:ListBuilds*", "codebuild:ListProjects", "codecommit:List*", @@ -12790,12 +28143,35 @@ aws_managed_policies_data = """ "datapipeline:ListPipelines", "datapipeline:DescribePipelines", "datapipeline:GetAccountLimits", + "dax:DescribeClusters", + "dax:DescribeDefaultParameters", + "dax:DescribeEvents", + "dax:DescribeParameterGroups", + "dax:DescribeParameters", + "dax:DescribeSubnetGroups", + "dax:DescribeTable", + "dax:ListTables", + "dax:ListTags", "devicefarm:List*", "directconnect:Describe*", "discovery:List*", "dms:List*", "ds:DescribeDirectories", + "dynamodb:DescribeBackup", + "dynamodb:DescribeContinuousBackups", + "dynamodb:DescribeGlobalTable", + "dynamodb:DescribeGlobalTableSettings", + "dynamodb:DescribeLimits", + "dynamodb:DescribeReservedCapacity", + "dynamodb:DescribeReservedCapacityOfferings", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:DescribeTimeToLive", + "dynamodb:ListBackups", + "dynamodb:ListGlobalTables", + "dynamodb:ListStreams", "dynamodb:ListTables", + "dynamodb:ListTagsOfResource", "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeAvailabilityZones", @@ -12826,12 +28202,14 @@ aws_managed_policies_data = """ "ec2:DescribeSnapshot*", "ec2:DescribeSpot*", "ec2:DescribeSubnets", + "ec2:DescribeTags", "ec2:DescribeVolume*", "ec2:DescribeVpc*", "ec2:DescribeVpnGateways", "ecr:DescribeRepositories", "ecr:ListImages", "ecs:List*", + "ecs:Describe*", "elasticache:Describe*", "elasticbeanstalk:DescribeApplicationVersions", "elasticbeanstalk:DescribeApplications", @@ -12854,6 +28232,7 @@ aws_managed_policies_data = """ "firehose:DescribeDeliveryStream", "gamelift:List*", "glacier:List*", + "greengrass:List*", "iam:List*", "iam:GetAccountSummary", "iam:GetLoginProfile", @@ -12904,6 +28283,8 @@ aws_managed_policies_data = """ "route53domains:List*", "s3:ListAllMyBuckets", "s3:ListBucket", + "sagemaker:Describe*", + "sagemaker:List*", "sdb:List*", "servicecatalog:List*", "ses:List*", @@ -12936,9 +28317,159 @@ aws_managed_policies_data = """ "IsAttachable": true, "IsDefaultVersion": true, "Path": "/job-function/", + "PermissionsBoundaryUsageCount": 0, "PolicyId": "ANPAID22R6XPJATWOFDK6", "PolicyName": "ViewOnlyAccess", - "UpdateDate": "2017-06-26T22:35:31+00:00", - "VersionId": "v3" + "UpdateDate": "2018-10-15T18:34:54+00:00", + "VersionId": "v7" + }, + "WAFLoggingServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/WAFLoggingServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-08-24T21:05:47+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "firehose:PutRecord", + "firehose:PutRecordBatch" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:firehose:*:*:deliverystream/aws-waf-logs-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJZ7N545GUNUHNTYOM", + "PolicyName": "WAFLoggingServiceRolePolicy", + "UpdateDate": "2018-08-24T21:05:47+00:00", + "VersionId": "v1" + }, + "WAFRegionalLoggingServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/aws-service-role/WAFRegionalLoggingServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2018-08-24T18:40:55+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "firehose:PutRecord", + "firehose:PutRecordBatch" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:firehose:*:*:deliverystream/aws-waf-logs-*" + ] + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/aws-service-role/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJE43HAZMEH4CI6SU2", + "PolicyName": "WAFRegionalLoggingServiceRolePolicy", + "UpdateDate": "2018-08-24T18:40:55+00:00", + "VersionId": "v1" + }, + "WellArchitectedConsoleFullAccess": { + "Arn": "arn:aws:iam::aws:policy/WellArchitectedConsoleFullAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-29T18:19:23+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "wellarchitected:*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIH6HSBHM3VSYC5SKA", + "PolicyName": "WellArchitectedConsoleFullAccess", + "UpdateDate": "2018-11-29T18:19:23+00:00", + "VersionId": "v1" + }, + "WellArchitectedConsoleReadOnlyAccess": { + "Arn": "arn:aws:iam::aws:policy/WellArchitectedConsoleReadOnlyAccess", + "AttachmentCount": 0, + "CreateDate": "2018-11-29T18:21:08+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "wellarchitected:Get*", + "wellarchitected:List*" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAIUTK35NDTYF6T2GFY", + "PolicyName": "WellArchitectedConsoleReadOnlyAccess", + "UpdateDate": "2018-11-29T18:21:08+00:00", + "VersionId": "v1" + }, + "WorkLinkServiceRolePolicy": { + "Arn": "arn:aws:iam::aws:policy/WorkLinkServiceRolePolicy", + "AttachmentCount": 0, + "CreateDate": "2019-01-23T19:03:45+00:00", + "DefaultVersionId": "v1", + "Document": { + "Statement": [ + { + "Action": [ + "ec2:CreateNetworkInterface", + "ec2:DeleteNetworkInterfacePermission", + "ec2:CreateNetworkInterfacePermission", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:DeleteNetworkInterface" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "kinesis:PutRecord", + "kinesis:PutRecords" + ], + "Effect": "Allow", + "Resource": "arn:aws:kinesis:*:*:stream/AmazonWorkLink-*" + } + ], + "Version": "2012-10-17" + }, + "IsAttachable": true, + "IsDefaultVersion": true, + "Path": "/", + "PermissionsBoundaryUsageCount": 0, + "PolicyId": "ANPAJ6JTE3DI5JOULLNLS", + "PolicyName": "WorkLinkServiceRolePolicy", + "UpdateDate": "2019-01-23T19:03:45+00:00", + "VersionId": "v1" } }""" diff --git a/moto/iam/exceptions.py b/moto/iam/exceptions.py index 84f15f51f..ac08e0d88 100644 --- a/moto/iam/exceptions.py +++ b/moto/iam/exceptions.py @@ -24,3 +24,72 @@ class IAMReportNotPresentException(RESTError): def __init__(self, message): super(IAMReportNotPresentException, self).__init__( "ReportNotPresent", message) + + +class IAMLimitExceededException(RESTError): + code = 400 + + def __init__(self, message): + super(IAMLimitExceededException, self).__init__( + "LimitExceeded", message) + + +class MalformedCertificate(RESTError): + code = 400 + + def __init__(self, cert): + super(MalformedCertificate, self).__init__( + 'MalformedCertificate', 'Certificate {cert} is malformed'.format(cert=cert)) + + +class MalformedPolicyDocument(RESTError): + code = 400 + + def __init__(self, message=""): + super(MalformedPolicyDocument, self).__init__( + 'MalformedPolicyDocument', message) + + +class DuplicateTags(RESTError): + code = 400 + + def __init__(self): + super(DuplicateTags, self).__init__( + 'InvalidInput', 'Duplicate tag keys found. Please note that Tag keys are case insensitive.') + + +class TagKeyTooBig(RESTError): + code = 400 + + def __init__(self, tag, param='tags.X.member.key'): + super(TagKeyTooBig, self).__init__( + 'ValidationError', "1 validation error detected: Value '{}' at '{}' failed to satisfy " + "constraint: Member must have length less than or equal to 128.".format(tag, param)) + + +class TagValueTooBig(RESTError): + code = 400 + + def __init__(self, tag): + super(TagValueTooBig, self).__init__( + 'ValidationError', "1 validation error detected: Value '{}' at 'tags.X.member.value' failed to satisfy " + "constraint: Member must have length less than or equal to 256.".format(tag)) + + +class InvalidTagCharacters(RESTError): + code = 400 + + def __init__(self, tag, param='tags.X.member.key'): + message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format(tag, param) + message += "constraint: Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+" + + super(InvalidTagCharacters, self).__init__('ValidationError', message) + + +class TooManyTags(RESTError): + code = 400 + + def __init__(self, tags, param='tags'): + super(TooManyTags, self).__init__( + 'ValidationError', "1 validation error detected: Value '{}' at '{}' failed to satisfy " + "constraint: Member must have length less than or equal to 50.".format(tags, param)) diff --git a/moto/iam/models.py b/moto/iam/models.py index 22bdfdb4b..f92568df4 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -1,14 +1,21 @@ from __future__ import unicode_literals import base64 +import sys from datetime import datetime import json +import re -import pytz +from cryptography import x509 +from cryptography.hazmat.backends import default_backend + +from moto.core.exceptions import RESTError from moto.core import BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_without_milliseconds +from moto.core.utils import iso_8601_datetime_without_milliseconds, iso_8601_datetime_with_milliseconds +from moto.iam.policy_validation import IAMPolicyDocumentValidator from .aws_managed_policies import aws_managed_policies_data -from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException +from .exceptions import IAMNotFoundException, IAMConflictException, IAMReportNotPresentException, IAMLimitExceededException, \ + MalformedCertificate, DuplicateTags, TagKeyTooBig, InvalidTagCharacters, TooManyTags, TagValueTooBig from .utils import random_access_key, random_alphanumeric, random_resource_id, random_policy_id ACCOUNT_ID = 123456789012 @@ -21,14 +28,17 @@ class MFADevice(object): serial_number, authentication_code_1, authentication_code_2): - self.enable_date = datetime.now(pytz.utc) + self.enable_date = datetime.utcnow() self.serial_number = serial_number self.authentication_code_1 = authentication_code_1 self.authentication_code_2 = authentication_code_2 + @property + def enabled_iso_8601(self): + return iso_8601_datetime_without_milliseconds(self.enable_date) + class Policy(BaseModel): - is_attachable = False def __init__(self, @@ -36,23 +46,51 @@ class Policy(BaseModel): default_version_id=None, description=None, document=None, - path=None): - self.document = document or {} + path=None, + create_date=None, + update_date=None): self.name = name self.attachment_count = 0 self.description = description or '' self.id = random_policy_id() self.path = path or '/' - self.default_version_id = default_version_id or 'v1' - self.versions = [] - self.create_datetime = datetime.now(pytz.utc) - self.update_datetime = datetime.now(pytz.utc) + if default_version_id: + self.default_version_id = default_version_id + self.next_version_num = int(default_version_id.lstrip('v')) + 1 + else: + self.default_version_id = 'v1' + self.next_version_num = 2 + self.versions = [PolicyVersion(self.arn, document, True, self.default_version_id, update_date)] + + self.create_date = create_date if create_date is not None else datetime.utcnow() + self.update_date = update_date if update_date is not None else datetime.utcnow() + + def update_default_version(self, new_default_version_id): + for version in self.versions: + if version.version_id == self.default_version_id: + version.is_default = False + break + self.default_version_id = new_default_version_id + + @property + def created_iso_8601(self): + return iso_8601_datetime_with_milliseconds(self.create_date) + + @property + def updated_iso_8601(self): + return iso_8601_datetime_with_milliseconds(self.update_date) + + +class SAMLProvider(BaseModel): + def __init__(self, name, saml_metadata_document=None): + self.name = name + self.saml_metadata_document = saml_metadata_document @property def arn(self): - return 'arn:aws:iam::aws:policy{0}{1}'.format(self.path, self.name) + return "arn:aws:iam::{0}:saml-provider/{1}".format(ACCOUNT_ID, self.name) class PolicyVersion(object): @@ -60,13 +98,19 @@ class PolicyVersion(object): def __init__(self, policy_arn, document, - is_default=False): + is_default=False, + version_id='v1', + create_date=None): self.policy_arn = policy_arn self.document = document or {} self.is_default = is_default - self.version_id = 'v1' + self.version_id = version_id - self.create_datetime = datetime.now(pytz.utc) + self.create_date = create_date if create_date is not None else datetime.utcnow() + + @property + def created_iso_8601(self): + return iso_8601_datetime_with_milliseconds(self.create_date) class ManagedPolicy(Policy): @@ -76,11 +120,15 @@ class ManagedPolicy(Policy): def attach_to(self, obj): self.attachment_count += 1 - obj.managed_policies[self.name] = self + obj.managed_policies[self.arn] = self def detach_from(self, obj): self.attachment_count -= 1 - del obj.managed_policies[self.name] + del obj.managed_policies[self.arn] + + @property + def arn(self): + return "arn:aws:iam::{0}:policy{1}{2}".format(ACCOUNT_ID, self.path, self.name) class AWSManagedPolicy(ManagedPolicy): @@ -91,7 +139,13 @@ class AWSManagedPolicy(ManagedPolicy): return cls(name, default_version_id=data.get('DefaultVersionId'), path=data.get('Path'), - document=data.get('Document')) + document=json.dumps(data.get('Document')), + create_date=datetime.strptime(data.get('CreateDate'), "%Y-%m-%dT%H:%M:%S+00:00"), + update_date=datetime.strptime(data.get('UpdateDate'), "%Y-%m-%dT%H:%M:%S+00:00")) + + @property + def arn(self): + return 'arn:aws:iam::aws:policy{0}{1}'.format(self.path, self.name) # AWS defines some of its own managed policies and we periodically @@ -107,13 +161,21 @@ class InlinePolicy(Policy): class Role(BaseModel): - def __init__(self, role_id, name, assume_role_policy_document, path): + def __init__(self, role_id, name, assume_role_policy_document, path, permissions_boundary): self.id = role_id self.name = name self.assume_role_policy_document = assume_role_policy_document - self.path = path + self.path = path or '/' self.policies = {} self.managed_policies = {} + self.create_date = datetime.utcnow() + self.tags = {} + self.description = "" + self.permissions_boundary = permissions_boundary + + @property + def created_iso_8601(self): + return iso_8601_datetime_with_milliseconds(self.create_date) @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): @@ -122,7 +184,8 @@ class Role(BaseModel): role = iam_backend.create_role( role_name=resource_name, assume_role_policy_document=properties['AssumeRolePolicyDocument'], - path=properties['Path'], + path=properties.get('Path', '/'), + permissions_boundary=properties.get('PermissionsBoundary', '') ) policies = properties.get('Policies', []) @@ -157,14 +220,22 @@ class Role(BaseModel): raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"') raise UnformattedGetAttTemplateException() + def get_tags(self): + return [self.tags[tag] for tag in self.tags] + class InstanceProfile(BaseModel): def __init__(self, instance_profile_id, name, path, roles): self.id = instance_profile_id self.name = name - self.path = path + self.path = path or '/' self.roles = roles if roles else [] + self.create_date = datetime.utcnow() + + @property + def created_iso_8601(self): + return iso_8601_datetime_with_milliseconds(self.create_date) @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): @@ -173,7 +244,7 @@ class InstanceProfile(BaseModel): role_ids = properties['Roles'] return iam_backend.create_instance_profile( name=resource_name, - path=properties['Path'], + path=properties.get('Path', '/'), role_ids=role_ids, ) @@ -188,7 +259,7 @@ class InstanceProfile(BaseModel): def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'Arn': - raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"') + return self.arn raise UnformattedGetAttTemplateException() @@ -210,17 +281,37 @@ class Certificate(BaseModel): return "arn:aws:iam::{0}:server-certificate{1}{2}".format(ACCOUNT_ID, self.path, self.cert_name) +class SigningCertificate(BaseModel): + + def __init__(self, id, user_name, body): + self.id = id + self.user_name = user_name + self.body = body + self.upload_date = datetime.utcnow() + self.status = 'Active' + + @property + def uploaded_iso_8601(self): + return iso_8601_datetime_without_milliseconds(self.upload_date) + + class AccessKey(BaseModel): def __init__(self, user_name): self.user_name = user_name - self.access_key_id = random_access_key() - self.secret_access_key = random_alphanumeric(32) + self.access_key_id = "AKIA" + random_access_key() + self.secret_access_key = random_alphanumeric(40) self.status = 'Active' - self.create_date = datetime.strftime( - datetime.utcnow(), - "%Y-%m-%dT%H:%M:%SZ" - ) + self.create_date = datetime.utcnow() + self.last_used = datetime.utcnow() + + @property + def created_iso_8601(self): + return iso_8601_datetime_without_milliseconds(self.create_date) + + @property + def last_used_iso_8601(self): + return iso_8601_datetime_without_milliseconds(self.last_used) def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException @@ -235,15 +326,16 @@ class Group(BaseModel): self.name = name self.id = random_resource_id() self.path = path - self.created = datetime.strftime( - datetime.utcnow(), - "%Y-%m-%d-%H-%M-%S" - ) + self.create_date = datetime.utcnow() self.users = [] self.managed_policies = {} self.policies = {} + @property + def created_iso_8601(self): + return iso_8601_datetime_with_milliseconds(self.create_date) + def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'Arn': @@ -252,7 +344,11 @@ class Group(BaseModel): @property def arn(self): - return "arn:aws:iam::{0}:group/{1}".format(ACCOUNT_ID, self.path) + if self.path == '/': + return "arn:aws:iam::{0}:group/{1}".format(ACCOUNT_ID, self.name) + + else: + return "arn:aws:iam::{0}:group/{1}/{2}".format(ACCOUNT_ID, self.path, self.name) def get_policy(self, policy_name): try: @@ -279,13 +375,14 @@ class User(BaseModel): self.name = name self.id = random_resource_id() self.path = path if path else "/" - self.created = datetime.utcnow() + self.create_date = datetime.utcnow() self.mfa_devices = {} self.policies = {} self.managed_policies = {} self.access_keys = [] self.password = None self.password_reset_required = False + self.signing_certificates = {} @property def arn(self): @@ -293,7 +390,7 @@ class User(BaseModel): @property def created_iso_8601(self): - return iso_8601_datetime_without_milliseconds(self.created) + return iso_8601_datetime_with_milliseconds(self.create_date) def get_policy(self, policy_name): policy_json = None @@ -341,13 +438,20 @@ class User(BaseModel): return self.access_keys def delete_access_key(self, access_key_id): + key = self.get_access_key_by_id(access_key_id) + self.access_keys.remove(key) + + def update_access_key(self, access_key_id, status): + key = self.get_access_key_by_id(access_key_id) + key.status = status + + def get_access_key_by_id(self, access_key_id): for key in self.access_keys: if key.access_key_id == access_key_id: - self.access_keys.remove(key) - break + return key else: raise IAMNotFoundException( - "Key {0} not found".format(access_key_id)) + "The Access Key with id {0} cannot be found".format(access_key_id)) def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException @@ -357,7 +461,7 @@ class User(BaseModel): def to_csv(self): date_format = '%Y-%m-%dT%H:%M:%S+00:00' - date_created = self.created + date_created = self.create_date # aagrawal,arn:aws:iam::509284790694:user/aagrawal,2014-09-01T22:28:48+00:00,true,2014-11-12T23:36:49+00:00,2014-09-03T18:59:00+00:00,N/A,false,true,2014-09-01T22:28:48+00:00,false,N/A,false,N/A,false,N/A if not self.password: password_enabled = 'false' @@ -408,16 +512,29 @@ class IAMBackend(BaseBackend): self.credential_report = None self.managed_policies = self._init_managed_policies() self.account_aliases = [] + self.saml_providers = {} + self.policy_arn_regex = re.compile( + r'^arn:aws:iam::[0-9]*:policy/.*$') super(IAMBackend, self).__init__() def _init_managed_policies(self): - return dict((p.name, p) for p in aws_managed_policies) + return dict((p.arn, p) for p in aws_managed_policies) def attach_role_policy(self, policy_arn, role_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) policy = arns[policy_arn] policy.attach_to(self.get_role(role_name)) + def update_role_description(self, role_name, role_description): + role = self.get_role(role_name) + role.description = role_description + return role + + def update_role(self, role_name, role_description): + role = self.get_role(role_name) + role.description = role_description + return role + def detach_role_policy(self, policy_arn, role_name): arns = dict((p.arn, p) for p in self.managed_policies.values()) try: @@ -459,17 +576,22 @@ class IAMBackend(BaseBackend): policy.detach_from(self.get_user(user_name)) def create_policy(self, description, path, policy_document, policy_name): + iam_policy_document_validator = IAMPolicyDocumentValidator(policy_document) + iam_policy_document_validator.validate() + policy = ManagedPolicy( policy_name, description=description, document=policy_document, path=path, ) - self.managed_policies[policy.name] = policy + self.managed_policies[policy.arn] = policy return policy - def get_policy(self, policy_name): - return self.managed_policies.get(policy_name) + def get_policy(self, policy_arn): + if policy_arn not in self.managed_policies: + raise IAMNotFoundException("Policy {0} not found".format(policy_arn)) + return self.managed_policies.get(policy_arn) def list_attached_role_policies(self, role_name, marker=None, max_items=100, path_prefix='/'): policies = self.get_role(role_name).managed_policies.values() @@ -513,9 +635,12 @@ class IAMBackend(BaseBackend): return policies, marker - def create_role(self, role_name, assume_role_policy_document, path): + def create_role(self, role_name, assume_role_policy_document, path, permissions_boundary): role_id = random_resource_id() - role = Role(role_id, role_name, assume_role_policy_document, path) + if permissions_boundary and not self.policy_arn_regex.match(permissions_boundary): + raise RESTError('InvalidParameterValue', 'Value ({}) for parameter PermissionsBoundary is invalid.'.format(permissions_boundary)) + + role = Role(role_id, role_name, assume_role_policy_document, path, permissions_boundary) self.roles[role_id] = role return role @@ -546,6 +671,9 @@ class IAMBackend(BaseBackend): def put_role_policy(self, role_name, policy_name, policy_json): role = self.get_role(role_name) + + iam_policy_document_validator = IAMPolicyDocumentValidator(policy_json) + iam_policy_document_validator.validate() role.put_policy(policy_name, policy_json) def delete_role_policy(self, role_name, policy_name): @@ -557,27 +685,112 @@ class IAMBackend(BaseBackend): for p, d in role.policies.items(): if p == policy_name: return p, d + raise IAMNotFoundException("Policy Document {0} not attached to role {1}".format(policy_name, role_name)) def list_role_policies(self, role_name): role = self.get_role(role_name) return role.policies.keys() + def _validate_tag_key(self, tag_key, exception_param='tags.X.member.key'): + """Validates the tag key. + + :param all_tags: Dict to check if there is a duplicate tag. + :param tag_key: The tag key to check against. + :param exception_param: The exception parameter to send over to help format the message. This is to reflect + the difference between the tag and untag APIs. + :return: + """ + # Validate that the key length is correct: + if len(tag_key) > 128: + raise TagKeyTooBig(tag_key, param=exception_param) + + # Validate that the tag key fits the proper Regex: + # [\w\s_.:/=+\-@]+ SHOULD be the same as the Java regex on the AWS documentation: [\p{L}\p{Z}\p{N}_.:/=+\-@]+ + match = re.findall(r'[\w\s_.:/=+\-@]+', tag_key) + # Kudos if you can come up with a better way of doing a global search :) + if not len(match) or len(match[0]) < len(tag_key): + raise InvalidTagCharacters(tag_key, param=exception_param) + + def _check_tag_duplicate(self, all_tags, tag_key): + """Validates that a tag key is not a duplicate + + :param all_tags: Dict to check if there is a duplicate tag. + :param tag_key: The tag key to check against. + :return: + """ + if tag_key in all_tags: + raise DuplicateTags() + + def list_role_tags(self, role_name, marker, max_items=100): + role = self.get_role(role_name) + + max_items = int(max_items) + tag_index = sorted(role.tags) + start_idx = int(marker) if marker else 0 + + tag_index = tag_index[start_idx:start_idx + max_items] + + if len(role.tags) <= (start_idx + max_items): + marker = None + else: + marker = str(start_idx + max_items) + + # Make the tag list of dict's: + tags = [role.tags[tag] for tag in tag_index] + + return tags, marker + + def tag_role(self, role_name, tags): + if len(tags) > 50: + raise TooManyTags(tags) + + role = self.get_role(role_name) + + tag_keys = {} + for tag in tags: + # Need to index by the lowercase tag key since the keys are case insensitive, but their case is retained. + ref_key = tag['Key'].lower() + self._check_tag_duplicate(tag_keys, ref_key) + self._validate_tag_key(tag['Key']) + if len(tag['Value']) > 256: + raise TagValueTooBig(tag['Value']) + + tag_keys[ref_key] = tag + + role.tags.update(tag_keys) + + def untag_role(self, role_name, tag_keys): + if len(tag_keys) > 50: + raise TooManyTags(tag_keys, param='tagKeys') + + role = self.get_role(role_name) + + for key in tag_keys: + ref_key = key.lower() + self._validate_tag_key(key, exception_param='tagKeys') + + role.tags.pop(ref_key, None) + def create_policy_version(self, policy_arn, policy_document, set_as_default): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + iam_policy_document_validator = IAMPolicyDocumentValidator(policy_document) + iam_policy_document_validator.validate() + + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") + if len(policy.versions) >= 5: + raise IAMLimitExceededException("A managed policy can have up to 5 versions. Before you create a new version, you must delete an existing version.") + set_as_default = (set_as_default == "true") # convert it to python bool version = PolicyVersion(policy_arn, policy_document, set_as_default) policy.versions.append(version) + version.version_id = 'v{0}'.format(policy.next_version_num) + policy.next_version_num += 1 if set_as_default: - policy.default_version_id = version.version_id + policy.update_default_version(version.version_id) return version def get_policy_version(self, policy_arn, version_id): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") for version in policy.versions: @@ -586,19 +799,18 @@ class IAMBackend(BaseBackend): raise IAMNotFoundException("Policy version not found") def list_policy_versions(self, policy_arn): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") return policy.versions def delete_policy_version(self, policy_arn, version_id): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") + if version_id == policy.default_version_id: + raise IAMConflictException(code="DeleteConflict", + message="Cannot delete the default version of a policy.") for i, v in enumerate(policy.versions): if v.version_id == version_id: del policy.versions[i] @@ -663,6 +875,20 @@ class IAMBackend(BaseBackend): "The Server Certificate with name {0} cannot be " "found.".format(name)) + def delete_server_certificate(self, name): + cert_id = None + for key, cert in self.certificates.items(): + if name == cert.cert_name: + cert_id = key + break + + if cert_id is None: + raise IAMNotFoundException( + "The Server Certificate with name {0} cannot be " + "found.".format(name)) + + self.certificates.pop(cert_id, None) + def create_group(self, group_name, path='/'): if group_name in self.groups: raise IAMConflictException( @@ -696,6 +922,9 @@ class IAMBackend(BaseBackend): def put_group_policy(self, group_name, policy_name, policy_json): group = self.get_group(group_name) + + iam_policy_document_validator = IAMPolicyDocumentValidator(policy_json) + iam_policy_document_validator.validate() group.put_policy(policy_name, policy_json) def list_group_policies(self, group_name, marker=None, max_items=None): @@ -734,6 +963,70 @@ class IAMBackend(BaseBackend): return users + def update_user(self, user_name, new_path=None, new_user_name=None): + try: + user = self.users[user_name] + except KeyError: + raise IAMNotFoundException("User {0} not found".format(user_name)) + + if new_path: + user.path = new_path + if new_user_name: + user.name = new_user_name + self.users[new_user_name] = self.users.pop(user_name) + + def list_roles(self, path_prefix, marker, max_items): + roles = None + try: + roles = self.roles.values() + except KeyError: + raise IAMNotFoundException( + "Users {0}, {1}, {2} not found".format(path_prefix, marker, max_items)) + + return roles + + def upload_signing_certificate(self, user_name, body): + user = self.get_user(user_name) + cert_id = random_resource_id(size=32) + + # Validate the signing cert: + try: + if sys.version_info < (3, 0): + data = bytes(body) + else: + data = bytes(body, 'utf8') + + x509.load_pem_x509_certificate(data, default_backend()) + + except Exception: + raise MalformedCertificate(body) + + user.signing_certificates[cert_id] = SigningCertificate(cert_id, user_name, body) + + return user.signing_certificates[cert_id] + + def delete_signing_certificate(self, user_name, cert_id): + user = self.get_user(user_name) + + try: + del user.signing_certificates[cert_id] + except KeyError: + raise IAMNotFoundException("The Certificate with id {id} cannot be found.".format(id=cert_id)) + + def list_signing_certificates(self, user_name): + user = self.get_user(user_name) + + return list(user.signing_certificates.values()) + + def update_signing_certificate(self, user_name, cert_id, status): + user = self.get_user(user_name) + + try: + user.signing_certificates[cert_id].status = status + + except KeyError: + raise IAMNotFoundException("The Certificate with id {id} cannot be found.".format(id=cert_id)) + def create_login_profile(self, user_name, password): # This does not currently deal with PasswordPolicyViolation. user = self.get_user(user_name) @@ -792,6 +1085,9 @@ class IAMBackend(BaseBackend): def put_user_policy(self, user_name, policy_name, policy_json): user = self.get_user(user_name) + + iam_policy_document_validator = IAMPolicyDocumentValidator(policy_json) + iam_policy_document_validator.validate() user.put_policy(policy_name, policy_json) def delete_user_policy(self, user_name, policy_name): @@ -803,6 +1099,28 @@ class IAMBackend(BaseBackend): key = user.create_access_key() return key + def update_access_key(self, user_name, access_key_id, status): + user = self.get_user(user_name) + user.update_access_key(access_key_id, status) + + def get_access_key_last_used(self, access_key_id): + access_keys_list = self.get_all_access_keys_for_all_users() + for key in access_keys_list: + if key.access_key_id == access_key_id: + return { + 'user_name': key.user_name, + 'last_used': key.last_used_iso_8601, + } + else: + raise IAMNotFoundException( + "The Access Key with id {0} cannot be found".format(access_key_id)) + + def get_all_access_keys_for_all_users(self): + access_keys_list = [] + for user_name in self.users: + access_keys_list += self.get_all_access_keys(user_name) + return access_keys_list + def get_all_access_keys(self, user_name, marker=None, max_items=None): user = self.get_user(user_name) keys = user.get_all_access_keys() @@ -875,5 +1193,60 @@ class IAMBackend(BaseBackend): def delete_account_alias(self, alias): self.account_aliases = [] + def get_account_authorization_details(self, filter): + policies = self.managed_policies.values() + local_policies = set(policies) - set(aws_managed_policies) + returned_policies = [] + + if len(filter) == 0: + return { + 'instance_profiles': self.instance_profiles.values(), + 'roles': self.roles.values(), + 'groups': self.groups.values(), + 'users': self.users.values(), + 'managed_policies': self.managed_policies.values() + } + + if 'AWSManagedPolicy' in filter: + returned_policies = aws_managed_policies + if 'LocalManagedPolicy' in filter: + returned_policies = returned_policies + list(local_policies) + + return { + 'instance_profiles': self.instance_profiles.values(), + 'roles': self.roles.values() if 'Role' in filter else [], + 'groups': self.groups.values() if 'Group' in filter else [], + 'users': self.users.values() if 'User' in filter else [], + 'managed_policies': returned_policies + } + + def create_saml_provider(self, name, saml_metadata_document): + saml_provider = SAMLProvider(name, saml_metadata_document) + self.saml_providers[name] = saml_provider + return saml_provider + + def update_saml_provider(self, saml_provider_arn, saml_metadata_document): + saml_provider = self.get_saml_provider(saml_provider_arn) + saml_provider.saml_metadata_document = saml_metadata_document + return saml_provider + + def delete_saml_provider(self, saml_provider_arn): + try: + for saml_provider in list(self.list_saml_providers()): + if saml_provider.arn == saml_provider_arn: + del self.saml_providers[saml_provider.name] + except KeyError: + raise IAMNotFoundException( + "SAMLProvider {0} not found".format(saml_provider_arn)) + + def list_saml_providers(self): + return self.saml_providers.values() + + def get_saml_provider(self, saml_provider_arn): + for saml_provider in self.list_saml_providers(): + if saml_provider.arn == saml_provider_arn: + return saml_provider + raise IAMNotFoundException("SamlProvider {0} not found".format(saml_provider_arn)) + iam_backend = IAMBackend() diff --git a/moto/iam/policy_validation.py b/moto/iam/policy_validation.py new file mode 100644 index 000000000..6ee286072 --- /dev/null +++ b/moto/iam/policy_validation.py @@ -0,0 +1,450 @@ +import json +import re + +from six import string_types + +from moto.iam.exceptions import MalformedPolicyDocument + + +VALID_TOP_ELEMENTS = [ + "Version", + "Id", + "Statement", + "Conditions" +] + +VALID_VERSIONS = [ + "2008-10-17", + "2012-10-17" +] + +VALID_STATEMENT_ELEMENTS = [ + "Sid", + "Action", + "NotAction", + "Resource", + "NotResource", + "Effect", + "Condition" +] + +VALID_EFFECTS = [ + "Allow", + "Deny" +] + +VALID_CONDITIONS = [ + "StringEquals", + "StringNotEquals", + "StringEqualsIgnoreCase", + "StringNotEqualsIgnoreCase", + "StringLike", + "StringNotLike", + "NumericEquals", + "NumericNotEquals", + "NumericLessThan", + "NumericLessThanEquals", + "NumericGreaterThan", + "NumericGreaterThanEquals", + "DateEquals", + "DateNotEquals", + "DateLessThan", + "DateLessThanEquals", + "DateGreaterThan", + "DateGreaterThanEquals", + "Bool", + "BinaryEquals", + "IpAddress", + "NotIpAddress", + "ArnEquals", + "ArnLike", + "ArnNotEquals", + "ArnNotLike", + "Null" +] + +VALID_CONDITION_PREFIXES = [ + "ForAnyValue:", + "ForAllValues:" +] + +VALID_CONDITION_POSTFIXES = [ + "IfExists" +] + +SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS = { + "iam": 'IAM resource {resource} cannot contain region information.', + "s3": 'Resource {resource} can not contain region information.' +} + +VALID_RESOURCE_PATH_STARTING_VALUES = { + "iam": { + "values": ["user/", "federated-user/", "role/", "group/", "instance-profile/", "mfa/", "server-certificate/", + "policy/", "sms-mfa/", "saml-provider/", "oidc-provider/", "report/", "access-report/"], + "error_message": 'IAM resource path must either be "*" or start with {values}.' + } +} + + +class IAMPolicyDocumentValidator: + + def __init__(self, policy_document): + self._policy_document = policy_document + self._policy_json = {} + self._statements = [] + self._resource_error = "" # the first resource error found that does not generate a legacy parsing error + + def validate(self): + try: + self._validate_syntax() + except Exception: + raise MalformedPolicyDocument("Syntax errors in policy.") + try: + self._validate_version() + except Exception: + raise MalformedPolicyDocument("Policy document must be version 2012-10-17 or greater.") + try: + self._perform_first_legacy_parsing() + self._validate_resources_for_formats() + self._validate_not_resources_for_formats() + except Exception: + raise MalformedPolicyDocument("The policy failed legacy parsing") + try: + self._validate_sid_uniqueness() + except Exception: + raise MalformedPolicyDocument("Statement IDs (SID) in a single policy must be unique.") + try: + self._validate_action_like_exist() + except Exception: + raise MalformedPolicyDocument("Policy statement must contain actions.") + try: + self._validate_resource_exist() + except Exception: + raise MalformedPolicyDocument("Policy statement must contain resources.") + + if self._resource_error != "": + raise MalformedPolicyDocument(self._resource_error) + + self._validate_actions_for_prefixes() + self._validate_not_actions_for_prefixes() + + def _validate_syntax(self): + self._policy_json = json.loads(self._policy_document) + assert isinstance(self._policy_json, dict) + self._validate_top_elements() + self._validate_version_syntax() + self._validate_id_syntax() + self._validate_statements_syntax() + + def _validate_top_elements(self): + top_elements = self._policy_json.keys() + for element in top_elements: + assert element in VALID_TOP_ELEMENTS + + def _validate_version_syntax(self): + if "Version" in self._policy_json: + assert self._policy_json["Version"] in VALID_VERSIONS + + def _validate_version(self): + assert self._policy_json["Version"] == "2012-10-17" + + def _validate_sid_uniqueness(self): + sids = [] + for statement in self._statements: + if "Sid" in statement: + assert statement["Sid"] not in sids + sids.append(statement["Sid"]) + + def _validate_statements_syntax(self): + assert "Statement" in self._policy_json + assert isinstance(self._policy_json["Statement"], (dict, list)) + + if isinstance(self._policy_json["Statement"], dict): + self._statements.append(self._policy_json["Statement"]) + else: + self._statements += self._policy_json["Statement"] + + assert self._statements + for statement in self._statements: + self._validate_statement_syntax(statement) + + @staticmethod + def _validate_statement_syntax(statement): + assert isinstance(statement, dict) + for statement_element in statement.keys(): + assert statement_element in VALID_STATEMENT_ELEMENTS + + assert ("Resource" not in statement or "NotResource" not in statement) + assert ("Action" not in statement or "NotAction" not in statement) + + IAMPolicyDocumentValidator._validate_effect_syntax(statement) + IAMPolicyDocumentValidator._validate_action_syntax(statement) + IAMPolicyDocumentValidator._validate_not_action_syntax(statement) + IAMPolicyDocumentValidator._validate_resource_syntax(statement) + IAMPolicyDocumentValidator._validate_not_resource_syntax(statement) + IAMPolicyDocumentValidator._validate_condition_syntax(statement) + IAMPolicyDocumentValidator._validate_sid_syntax(statement) + + @staticmethod + def _validate_effect_syntax(statement): + assert "Effect" in statement + assert isinstance(statement["Effect"], string_types) + assert statement["Effect"].lower() in [allowed_effect.lower() for allowed_effect in VALID_EFFECTS] + + @staticmethod + def _validate_action_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "Action") + + @staticmethod + def _validate_not_action_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "NotAction") + + @staticmethod + def _validate_resource_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "Resource") + + @staticmethod + def _validate_not_resource_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "NotResource") + + @staticmethod + def _validate_string_or_list_of_strings_syntax(statement, key): + if key in statement: + assert isinstance(statement[key], (string_types, list)) + if isinstance(statement[key], list): + for resource in statement[key]: + assert isinstance(resource, string_types) + + @staticmethod + def _validate_condition_syntax(statement): + if "Condition" in statement: + assert isinstance(statement["Condition"], dict) + for condition_key, condition_value in statement["Condition"].items(): + assert isinstance(condition_value, dict) + for condition_element_key, condition_element_value in condition_value.items(): + assert isinstance(condition_element_value, (list, string_types)) + + if IAMPolicyDocumentValidator._strip_condition_key(condition_key) not in VALID_CONDITIONS: + assert not condition_value # empty dict + + @staticmethod + def _strip_condition_key(condition_key): + for valid_prefix in VALID_CONDITION_PREFIXES: + if condition_key.startswith(valid_prefix): + condition_key = condition_key[len(valid_prefix):] + break # strip only the first match + + for valid_postfix in VALID_CONDITION_POSTFIXES: + if condition_key.endswith(valid_postfix): + condition_key = condition_key[:-len(valid_postfix)] + break # strip only the first match + + return condition_key + + @staticmethod + def _validate_sid_syntax(statement): + if "Sid" in statement: + assert isinstance(statement["Sid"], string_types) + + def _validate_id_syntax(self): + if "Id" in self._policy_json: + assert isinstance(self._policy_json["Id"], string_types) + + def _validate_resource_exist(self): + for statement in self._statements: + assert ("Resource" in statement or "NotResource" in statement) + if "Resource" in statement and isinstance(statement["Resource"], list): + assert statement["Resource"] + elif "NotResource" in statement and isinstance(statement["NotResource"], list): + assert statement["NotResource"] + + def _validate_action_like_exist(self): + for statement in self._statements: + assert ("Action" in statement or "NotAction" in statement) + if "Action" in statement and isinstance(statement["Action"], list): + assert statement["Action"] + elif "NotAction" in statement and isinstance(statement["NotAction"], list): + assert statement["NotAction"] + + def _validate_actions_for_prefixes(self): + self._validate_action_like_for_prefixes("Action") + + def _validate_not_actions_for_prefixes(self): + self._validate_action_like_for_prefixes("NotAction") + + def _validate_action_like_for_prefixes(self, key): + for statement in self._statements: + if key in statement: + if isinstance(statement[key], string_types): + self._validate_action_prefix(statement[key]) + else: + for action in statement[key]: + self._validate_action_prefix(action) + + @staticmethod + def _validate_action_prefix(action): + action_parts = action.split(":") + if len(action_parts) == 1 and action_parts[0] != "*": + raise MalformedPolicyDocument("Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.") + elif len(action_parts) > 2: + raise MalformedPolicyDocument("Actions/Condition can contain only one colon.") + + vendor_pattern = re.compile(r'[^a-zA-Z0-9\-.]') + if action_parts[0] != "*" and vendor_pattern.search(action_parts[0]): + raise MalformedPolicyDocument("Vendor {vendor} is not valid".format(vendor=action_parts[0])) + + def _validate_resources_for_formats(self): + self._validate_resource_like_for_formats("Resource") + + def _validate_not_resources_for_formats(self): + self._validate_resource_like_for_formats("NotResource") + + def _validate_resource_like_for_formats(self, key): + for statement in self._statements: + if key in statement: + if isinstance(statement[key], string_types): + self._validate_resource_format(statement[key]) + else: + for resource in sorted(statement[key], reverse=True): + self._validate_resource_format(resource) + if self._resource_error == "": + IAMPolicyDocumentValidator._legacy_parse_resource_like(statement, key) + + def _validate_resource_format(self, resource): + if resource != "*": + resource_partitions = resource.partition(":") + + if resource_partitions[1] == "": + self._resource_error = 'Resource {resource} must be in ARN format or "*".'.format(resource=resource) + return + + resource_partitions = resource_partitions[2].partition(":") + if resource_partitions[0] != "aws": + remaining_resource_parts = resource_partitions[2].split(":") + + arn1 = remaining_resource_parts[0] if remaining_resource_parts[0] != "" or len(remaining_resource_parts) > 1 else "*" + arn2 = remaining_resource_parts[1] if len(remaining_resource_parts) > 1 else "*" + arn3 = remaining_resource_parts[2] if len(remaining_resource_parts) > 2 else "*" + arn4 = ":".join(remaining_resource_parts[3:]) if len(remaining_resource_parts) > 3 else "*" + self._resource_error = 'Partition "{partition}" is not valid for resource "arn:{partition}:{arn1}:{arn2}:{arn3}:{arn4}".'.format( + partition=resource_partitions[0], + arn1=arn1, + arn2=arn2, + arn3=arn3, + arn4=arn4 + ) + return + + if resource_partitions[1] != ":": + self._resource_error = "Resource vendor must be fully qualified and cannot contain regexes." + return + + resource_partitions = resource_partitions[2].partition(":") + + service = resource_partitions[0] + + if service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() and not resource_partitions[2].startswith(":"): + self._resource_error = SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[service].format(resource=resource) + return + + resource_partitions = resource_partitions[2].partition(":") + resource_partitions = resource_partitions[2].partition(":") + + if service in VALID_RESOURCE_PATH_STARTING_VALUES.keys(): + valid_start = False + for valid_starting_value in VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"]: + if resource_partitions[2].startswith(valid_starting_value): + valid_start = True + break + if not valid_start: + self._resource_error = VALID_RESOURCE_PATH_STARTING_VALUES[service]["error_message"].format( + values=", ".join(VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"]) + ) + + def _perform_first_legacy_parsing(self): + """This method excludes legacy parsing resources, since that have to be done later.""" + for statement in self._statements: + self._legacy_parse_statement(statement) + + @staticmethod + def _legacy_parse_statement(statement): + assert statement["Effect"] in VALID_EFFECTS # case-sensitive matching + if "Condition" in statement: + for condition_key, condition_value in statement["Condition"].items(): + IAMPolicyDocumentValidator._legacy_parse_condition(condition_key, condition_value) + + @staticmethod + def _legacy_parse_resource_like(statement, key): + if isinstance(statement[key], string_types): + if statement[key] != "*": + assert statement[key].count(":") >= 5 or "::" not in statement[key] + assert statement[key].split(":")[2] != "" + else: # list + for resource in statement[key]: + if resource != "*": + assert resource.count(":") >= 5 or "::" not in resource + assert resource[2] != "" + + @staticmethod + def _legacy_parse_condition(condition_key, condition_value): + stripped_condition_key = IAMPolicyDocumentValidator._strip_condition_key(condition_key) + + if stripped_condition_key.startswith("Date"): + for condition_element_key, condition_element_value in condition_value.items(): + if isinstance(condition_element_value, string_types): + IAMPolicyDocumentValidator._legacy_parse_date_condition_value(condition_element_value) + else: # it has to be a list + for date_condition_value in condition_element_value: + IAMPolicyDocumentValidator._legacy_parse_date_condition_value(date_condition_value) + + @staticmethod + def _legacy_parse_date_condition_value(date_condition_value): + if "t" in date_condition_value.lower() or "-" in date_condition_value: + IAMPolicyDocumentValidator._validate_iso_8601_datetime(date_condition_value.lower()) + else: # timestamp + assert 0 <= int(date_condition_value) <= 9223372036854775807 + + @staticmethod + def _validate_iso_8601_datetime(datetime): + datetime_parts = datetime.partition("t") + negative_year = datetime_parts[0].startswith("-") + date_parts = datetime_parts[0][1:].split("-") if negative_year else datetime_parts[0].split("-") + year = "-" + date_parts[0] if negative_year else date_parts[0] + assert -292275054 <= int(year) <= 292278993 + if len(date_parts) > 1: + month = date_parts[1] + assert 1 <= int(month) <= 12 + if len(date_parts) > 2: + day = date_parts[2] + assert 1 <= int(day) <= 31 + assert len(date_parts) < 4 + + time_parts = datetime_parts[2].split(":") + if time_parts[0] != "": + hours = time_parts[0] + assert 0 <= int(hours) <= 23 + if len(time_parts) > 1: + minutes = time_parts[1] + assert 0 <= int(minutes) <= 59 + if len(time_parts) > 2: + if "z" in time_parts[2]: + seconds_with_decimal_fraction = time_parts[2].partition("z")[0] + assert time_parts[2].partition("z")[2] == "" + elif "+" in time_parts[2]: + seconds_with_decimal_fraction = time_parts[2].partition("+")[0] + time_zone_data = time_parts[2].partition("+")[2].partition(":") + time_zone_hours = time_zone_data[0] + assert len(time_zone_hours) == 2 + assert 0 <= int(time_zone_hours) <= 23 + if time_zone_data[1] == ":": + time_zone_minutes = time_zone_data[2] + assert len(time_zone_minutes) == 2 + assert 0 <= int(time_zone_minutes) <= 59 + else: + seconds_with_decimal_fraction = time_parts[2] + seconds_with_decimal_fraction_partition = seconds_with_decimal_fraction.partition(".") + seconds = seconds_with_decimal_fraction_partition[0] + assert 0 <= int(seconds) <= 59 + if seconds_with_decimal_fraction_partition[1] == ".": + decimal_seconds = seconds_with_decimal_fraction_partition[2] + assert 0 <= int(decimal_seconds) <= 999999999 diff --git a/moto/iam/responses.py b/moto/iam/responses.py index df32732a0..3ee9c3762 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -58,6 +58,12 @@ class IamResponse(BaseResponse): template = self.response_template(CREATE_POLICY_TEMPLATE) return template.render(policy=policy) + def get_policy(self): + policy_arn = self._get_param('PolicyArn') + policy = iam_backend.get_policy(policy_arn) + template = self.response_template(GET_POLICY_TEMPLATE) + return template.render(policy=policy) + def list_attached_role_policies(self): marker = self._get_param('Marker') max_items = self._get_int_param('MaxItems', 100) @@ -101,14 +107,79 @@ class IamResponse(BaseResponse): template = self.response_template(LIST_POLICIES_TEMPLATE) return template.render(policies=policies, marker=marker) + def list_entities_for_policy(self): + policy_arn = self._get_param('PolicyArn') + + # Options 'User'|'Role'|'Group'|'LocalManagedPolicy'|'AWSManagedPolicy + entity = self._get_param('EntityFilter') + path_prefix = self._get_param('PathPrefix') + # policy_usage_filter = self._get_param('PolicyUsageFilter') + marker = self._get_param('Marker') + max_items = self._get_param('MaxItems') + + entity_roles = [] + entity_groups = [] + entity_users = [] + + if entity == 'User': + users = iam_backend.list_users(path_prefix, marker, max_items) + if users: + for user in users: + for p in user.managed_policies: + if p == policy_arn: + entity_users.append(user.name) + + elif entity == 'Role': + roles = iam_backend.list_roles(path_prefix, marker, max_items) + if roles: + for role in roles: + for p in role.managed_policies: + if p == policy_arn: + entity_roles.append(role.name) + + elif entity == 'Group': + groups = iam_backend.list_groups() + if groups: + for group in groups: + for p in group.managed_policies: + if p == policy_arn: + entity_groups.append(group.name) + + elif entity == 'LocalManagedPolicy' or entity == 'AWSManagedPolicy': + users = iam_backend.list_users(path_prefix, marker, max_items) + if users: + for user in users: + for p in user.managed_policies: + if p == policy_arn: + entity_users.append(user.name) + + roles = iam_backend.list_roles(path_prefix, marker, max_items) + if roles: + for role in roles: + for p in role.managed_policies: + if p == policy_arn: + entity_roles.append(role.name) + + groups = iam_backend.list_groups() + if groups: + for group in groups: + for p in group.managed_policies: + if p == policy_arn: + entity_groups.append(group.name) + + template = self.response_template(LIST_ENTITIES_FOR_POLICY_TEMPLATE) + return template.render(roles=entity_roles, users=entity_users, groups=entity_groups) + def create_role(self): role_name = self._get_param('RoleName') path = self._get_param('Path') assume_role_policy_document = self._get_param( 'AssumeRolePolicyDocument') + permissions_boundary = self._get_param( + 'PermissionsBoundary') role = iam_backend.create_role( - role_name, assume_role_policy_document, path) + role_name, assume_role_policy_document, path, permissions_boundary) template = self.response_template(CREATE_ROLE_TEMPLATE) return template.render(role=role) @@ -163,6 +234,20 @@ class IamResponse(BaseResponse): template = self.response_template(GENERIC_EMPTY_TEMPLATE) return template.render(name="UpdateAssumeRolePolicyResponse") + def update_role_description(self): + role_name = self._get_param('RoleName') + description = self._get_param('Description') + role = iam_backend.update_role_description(role_name, description) + template = self.response_template(UPDATE_ROLE_DESCRIPTION_TEMPLATE) + return template.render(role=role) + + def update_role(self): + role_name = self._get_param('RoleName') + description = self._get_param('Description') + role = iam_backend.update_role(role_name, description) + template = self.response_template(UPDATE_ROLE_TEMPLATE) + return template.render(role=role) + def create_policy_version(self): policy_arn = self._get_param('PolicyArn') policy_document = self._get_param('PolicyDocument') @@ -195,7 +280,7 @@ class IamResponse(BaseResponse): def create_instance_profile(self): profile_name = self._get_param('InstanceProfileName') - path = self._get_param('Path') + path = self._get_param('Path', '/') profile = iam_backend.create_instance_profile( profile_name, path, role_ids=[]) @@ -271,9 +356,15 @@ class IamResponse(BaseResponse): template = self.response_template(GET_SERVER_CERTIFICATE_TEMPLATE) return template.render(certificate=cert) + def delete_server_certificate(self): + cert_name = self._get_param('ServerCertificateName') + iam_backend.delete_server_certificate(cert_name) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name="DeleteServerCertificate") + def create_group(self): group_name = self._get_param('GroupName') - path = self._get_param('Path') + path = self._get_param('Path', '/') group = iam_backend.create_group(group_name, path) template = self.response_template(CREATE_GROUP_TEMPLATE) @@ -351,6 +442,18 @@ class IamResponse(BaseResponse): template = self.response_template(LIST_USERS_TEMPLATE) return template.render(action='List', users=users) + def update_user(self): + user_name = self._get_param('UserName') + new_path = self._get_param('NewPath') + new_user_name = self._get_param('NewUserName') + iam_backend.update_user(user_name, new_path, new_user_name) + if new_user_name: + user = iam_backend.get_user(new_user_name) + else: + user = iam_backend.get_user(user_name) + template = self.response_template(USER_TEMPLATE) + return template.render(action='Update', user=user) + def create_login_profile(self): user_name = self._get_param('UserName') password = self._get_param('Password') @@ -401,7 +504,7 @@ class IamResponse(BaseResponse): return template.render( user_name=user_name, policy_name=policy_name, - policy_document=policy_document + policy_document=policy_document.get('policy_document') ) def list_user_policies(self): @@ -434,9 +537,22 @@ class IamResponse(BaseResponse): template = self.response_template(CREATE_ACCESS_KEY_TEMPLATE) return template.render(key=key) + def update_access_key(self): + user_name = self._get_param('UserName') + access_key_id = self._get_param('AccessKeyId') + status = self._get_param('Status') + iam_backend.update_access_key(user_name, access_key_id, status) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name='UpdateAccessKey') + + def get_access_key_last_used(self): + access_key_id = self._get_param('AccessKeyId') + last_used_response = iam_backend.get_access_key_last_used(access_key_id) + template = self.response_template(GET_ACCESS_KEY_LAST_USED_TEMPLATE) + return template.render(user_name=last_used_response["user_name"], last_used=last_used_response["last_used"]) + def list_access_keys(self): user_name = self._get_param('UserName') - keys = iam_backend.get_all_access_keys(user_name) template = self.response_template(LIST_ACCESS_KEYS_TEMPLATE) return template.render(user_name=user_name, keys=keys) @@ -520,6 +636,146 @@ class IamResponse(BaseResponse): template = self.response_template(DELETE_ACCOUNT_ALIAS_TEMPLATE) return template.render() + def get_account_authorization_details(self): + filter_param = self._get_multi_param('Filter.member') + account_details = iam_backend.get_account_authorization_details(filter_param) + template = self.response_template(GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE) + return template.render( + instance_profiles=account_details['instance_profiles'], + policies=account_details['managed_policies'], + users=account_details['users'], + groups=account_details['groups'], + roles=account_details['roles'], + get_groups_for_user=iam_backend.get_groups_for_user + ) + + def create_saml_provider(self): + saml_provider_name = self._get_param('Name') + saml_metadata_document = self._get_param('SAMLMetadataDocument') + saml_provider = iam_backend.create_saml_provider(saml_provider_name, saml_metadata_document) + + template = self.response_template(CREATE_SAML_PROVIDER_TEMPLATE) + return template.render(saml_provider=saml_provider) + + def update_saml_provider(self): + saml_provider_arn = self._get_param('SAMLProviderArn') + saml_metadata_document = self._get_param('SAMLMetadataDocument') + saml_provider = iam_backend.update_saml_provider(saml_provider_arn, saml_metadata_document) + + template = self.response_template(UPDATE_SAML_PROVIDER_TEMPLATE) + return template.render(saml_provider=saml_provider) + + def delete_saml_provider(self): + saml_provider_arn = self._get_param('SAMLProviderArn') + iam_backend.delete_saml_provider(saml_provider_arn) + + template = self.response_template(DELETE_SAML_PROVIDER_TEMPLATE) + return template.render() + + def list_saml_providers(self): + saml_providers = iam_backend.list_saml_providers() + + template = self.response_template(LIST_SAML_PROVIDERS_TEMPLATE) + return template.render(saml_providers=saml_providers) + + def get_saml_provider(self): + saml_provider_arn = self._get_param('SAMLProviderArn') + saml_provider = iam_backend.get_saml_provider(saml_provider_arn) + + template = self.response_template(GET_SAML_PROVIDER_TEMPLATE) + return template.render(saml_provider=saml_provider) + + def upload_signing_certificate(self): + user_name = self._get_param('UserName') + cert_body = self._get_param('CertificateBody') + + cert = iam_backend.upload_signing_certificate(user_name, cert_body) + template = self.response_template(UPLOAD_SIGNING_CERTIFICATE_TEMPLATE) + return template.render(cert=cert) + + def update_signing_certificate(self): + user_name = self._get_param('UserName') + cert_id = self._get_param('CertificateId') + status = self._get_param('Status') + + iam_backend.update_signing_certificate(user_name, cert_id, status) + template = self.response_template(UPDATE_SIGNING_CERTIFICATE_TEMPLATE) + return template.render() + + def delete_signing_certificate(self): + user_name = self._get_param('UserName') + cert_id = self._get_param('CertificateId') + + iam_backend.delete_signing_certificate(user_name, cert_id) + template = self.response_template(DELETE_SIGNING_CERTIFICATE_TEMPLATE) + return template.render() + + def list_signing_certificates(self): + user_name = self._get_param('UserName') + + certs = iam_backend.list_signing_certificates(user_name) + template = self.response_template(LIST_SIGNING_CERTIFICATES_TEMPLATE) + return template.render(user_name=user_name, certificates=certs) + + def list_role_tags(self): + role_name = self._get_param('RoleName') + marker = self._get_param('Marker') + max_items = self._get_param('MaxItems', 100) + + tags, marker = iam_backend.list_role_tags(role_name, marker, max_items) + + template = self.response_template(LIST_ROLE_TAG_TEMPLATE) + return template.render(tags=tags, marker=marker) + + def tag_role(self): + role_name = self._get_param('RoleName') + tags = self._get_multi_param('Tags.member') + + iam_backend.tag_role(role_name, tags) + + template = self.response_template(TAG_ROLE_TEMPLATE) + return template.render() + + def untag_role(self): + role_name = self._get_param('RoleName') + tag_keys = self._get_multi_param('TagKeys.member') + + iam_backend.untag_role(role_name, tag_keys) + + template = self.response_template(UNTAG_ROLE_TEMPLATE) + return template.render() + + +LIST_ENTITIES_FOR_POLICY_TEMPLATE = """ + + + {% for role in roles %} + + {{ role }} + + {% endfor %} + + + {% for group in groups %} + + {{ group }} + + {% endfor %} + + false + + {% for user in users %} + + {{ user }} + + {% endfor %} + + + + eb358e22-9d1f-11e4-93eb-190ecEXAMPLE + +""" + ATTACH_ROLE_POLICY_TEMPLATE = """ @@ -562,12 +818,12 @@ CREATE_POLICY_TEMPLATE = """ {{ policy.arn }} {{ policy.attachment_count }} - {{ policy.create_datetime.isoformat() }} + {{ policy.created_iso_8601 }} {{ policy.default_version_id }} {{ policy.path }} {{ policy.id }} {{ policy.name }} - {{ policy.update_datetime.isoformat() }} + {{ policy.updated_iso_8601 }} @@ -575,6 +831,25 @@ CREATE_POLICY_TEMPLATE = """ """ +GET_POLICY_TEMPLATE = """ + + + {{ policy.name }} + {{ policy.description }} + {{ policy.default_version_id }} + {{ policy.id }} + {{ policy.path }} + {{ policy.arn }} + {{ policy.attachment_count }} + {{ policy.created_iso_8601 }} + {{ policy.updated_iso_8601 }} + + + + 684f0917-3d22-11e4-a4a0-cffb9EXAMPLE + +""" + LIST_ATTACHED_ROLE_POLICIES_TEMPLATE = """ {% if marker is none %} @@ -654,12 +929,12 @@ LIST_POLICIES_TEMPLATE = """ {{ policy.arn }} {{ policy.attachment_count }} - {{ policy.create_datetime.isoformat() }} + {{ policy.created_iso_8601 }} {{ policy.default_version_id }} {{ policy.path }} {{ policy.id }} {{ policy.name }} - {{ policy.update_datetime.isoformat() }} + {{ policy.updated_iso_8601 }} {% endfor %} @@ -683,7 +958,7 @@ CREATE_INSTANCE_PROFILE_TEMPLATE = """ + + + + df37e965-9967-11e1-a4c3-270EXAMPLE04 + +""" + +UPDATE_ROLE_DESCRIPTION_TEMPLATE = """ + + + {{ role.path }} + {{ role.arn }} + {{ role.name }} + {{ role.assume_role_policy_document }} + {{ role.created_iso_8601 }} + {{ role.id }} + {% if role.tags %} + + {% for tag in role.get_tags() %} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {% endfor %} + + {% endif %} + + + + df37e965-9967-11e1-a4c3-270EXAMPLE04 + +""" + GET_ROLE_TEMPLATE = """ @@ -752,8 +1067,18 @@ GET_ROLE_TEMPLATE = """ {{ group.name }} {{ group.id }} {{ group.arn }} + {{ group.created_iso_8601 }} @@ -970,6 +1302,7 @@ GET_GROUP_TEMPLATE = """ {{ group.name }} {{ group.id }} {{ group.arn }} + {{ group.created_iso_8601 }} {% for user in group.users %} @@ -1016,6 +1349,7 @@ LIST_GROUPS_FOR_USER_TEMPLATE = """ {{ group.name }} {{ group.id }} {{ group.arn }} + {{ group.created_iso_8601 }} {% endfor %} @@ -1146,8 +1480,8 @@ LIST_USER_POLICIES_TEMPLATE = """ {{ policy }} {% endfor %} + false - false 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE @@ -1160,6 +1494,7 @@ CREATE_ACCESS_KEY_TEMPLATE = """ {{ key.access_key_id }} {{ key.status }} {{ key.secret_access_key }} + {{ key.created_iso_8601 }} @@ -1176,7 +1511,7 @@ LIST_ACCESS_KEYS_TEMPLATE = """ {{ user_name }} {{ key.access_key_id }} {{ key.status }} - {{ key.create_date }} + {{ key.created_iso_8601 }} {% endfor %} @@ -1187,11 +1522,23 @@ LIST_ACCESS_KEYS_TEMPLATE = """ """ + +GET_ACCESS_KEY_LAST_USED_TEMPLATE = """ + + + {{ user_name }} + + {{ last_used }} + + + +""" + CREDENTIAL_REPORT_GENERATING = """ - STARTED - No report exists. Starting a new report generation task + STARTED + No report exists. Starting a new report generation task fa788a82-aa8a-11e4-a278-1786c418872b" @@ -1200,7 +1547,7 @@ CREDENTIAL_REPORT_GENERATING = """ CREDENTIAL_REPORT_GENERATED = """ - COMPLETE + COMPLETE fa788a82-aa8a-11e4-a278-1786c418872b" @@ -1209,7 +1556,7 @@ CREDENTIAL_REPORT_GENERATED = """ CREDENTIAL_REPORT = """ - {{ report }} + {{ report }} 2015-02-02T20:02:02Z text/csv @@ -1224,23 +1571,23 @@ LIST_INSTANCE_PROFILES_FOR_ROLE_TEMPLATE = """ {% for profile in instance_profiles %} - {{ profile.id }} - - {% for role in profile.roles %} - - {{ role.path }} - {{ role.arn }} - {{ role.name }} - {{ role.assume_policy_document }} - 2012-05-09T15:45:35Z - {{ role.id }} - - {% endfor %} - - {{ profile.name }} - {{ profile.path }} - {{ profile.arn }} - 2012-05-09T16:27:11Z + {{ profile.id }} + + {% for role in profile.roles %} + + {{ role.path }} + {{ role.arn }} + {{ role.name }} + {{ role.assume_policy_document }} + {{ role.created_iso_8601 }} + {{ role.id }} + + {% endfor %} + + {{ profile.name }} + {{ profile.path }} + {{ profile.arn }} + {{ profile.created_iso_8601 }} {% endfor %} @@ -1295,3 +1642,305 @@ DELETE_ACCOUNT_ALIAS_TEMPLATE = """ + + + {% for group in groups %} + + {{ group.path }} + {{ group.name }} + {{ group.id }} + {{ group.arn }} + {{ group.created_iso_8601 }} + + {% endfor %} + + false + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + + +GET_ACCOUNT_AUTHORIZATION_DETAILS_TEMPLATE = """ + + false + + {% for user in users %} + + + {% for group in get_groups_for_user(user.name) %} + {{ group.name }} + {% endfor %} + + + {% for policy in user.managed_policies %} + + {{ user.managed_policies[policy].name }} + {{ policy }} + + {% endfor %} + + {{ user.id }} + {{ user.path }} + {{ user.name }} + {{ user.arn }} + {{ user.created_iso_8601 }} + + {% endfor %} + + + {% for group in groups %} + + {{ group.id }} + + {% for policy_arn in group.managed_policies %} + + {{ group.managed_policies[policy_arn].name }} + {{ policy_arn }} + + {% endfor %} + + {{ group.name }} + {{ group.path }} + {{ group.arn }} + {{ group.created_iso_8601 }} + + {% for policy in group.policies %} + + {{ policy }} + {{ group.get_policy(policy) }} + + {% endfor %} + + + {% endfor %} + + + {% for role in roles %} + + + {% for inline_policy in role.policies %} + + {{ inline_policy }} + {{ role.policies[inline_policy] }} + + {% endfor %} + + + {% for policy_arn in role.managed_policies %} + + {{ role.managed_policies[policy_arn].name }} + {{ policy_arn }} + + {% endfor %} + + + {% for tag in role.get_tags() %} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {% endfor %} + + + {% for profile in instance_profiles %} + + {{ profile.id }} + + {% for role in profile.roles %} + + {{ role.path }} + {{ role.arn }} + {{ role.name }} + {{ role.assume_role_policy_document }} + {{ role.created_iso_8601 }} + {{ role.id }} + + {% endfor %} + + {{ profile.name }} + {{ profile.path }} + {{ profile.arn }} + {{ profile.created_iso_8601 }} + + {% endfor %} + + {{ role.path }} + {{ role.arn }} + {{ role.name }} + {{ role.assume_role_policy_document }} + {{ role.created_iso_8601 }} + {{ role.id }} + + {% endfor %} + + + {% for policy in policies %} + + {{ policy.name }} + {{ policy.default_version_id }} + {{ policy.id }} + {{ policy.path }} + + {% for policy_version in policy.versions %} + + {{ policy_version.document }} + {{ policy_version.is_default | lower }} + {{ policy_version.version_id }} + {{ policy_version.created_iso_8601 }} + + {% endfor %} + + {{ policy.arn }} + 1 + {{ policy.created_iso_8601 }} + true + {{ policy.updated_iso_8601 }} + + {% endfor %} + + + + 92e79ae7-7399-11e4-8c85-4b53eEXAMPLE + +""" + +CREATE_SAML_PROVIDER_TEMPLATE = """ + + {{ saml_provider.arn }} + + + 29f47818-99f5-11e1-a4c3-27EXAMPLE804 + +""" + +LIST_SAML_PROVIDERS_TEMPLATE = """ + + + {% for saml_provider in saml_providers %} + + {{ saml_provider.arn }} + 2032-05-09T16:27:11Z + 2012-05-09T16:27:03Z + + {% endfor %} + + + + fd74fa8d-99f3-11e1-a4c3-27EXAMPLE804 + +""" + +GET_SAML_PROVIDER_TEMPLATE = """ + + 2012-05-09T16:27:11Z + 2015-12-31T21:59:59Z + {{ saml_provider.saml_metadata_document }} + + + 29f47818-99f5-11e1-a4c3-27EXAMPLE804 + +""" + +DELETE_SAML_PROVIDER_TEMPLATE = """ + + c749ee7f-99ef-11e1-a4c3-27EXAMPLE804 + +""" + +UPDATE_SAML_PROVIDER_TEMPLATE = """ + + {{ saml_provider.arn }} + + + 29f47818-99f5-11e1-a4c3-27EXAMPLE804 + +""" + +UPLOAD_SIGNING_CERTIFICATE_TEMPLATE = """ + + + {{ cert.user_name }} + {{ cert.id }} + {{ cert.body }} + {{ cert.status }} + + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + + +UPDATE_SIGNING_CERTIFICATE_TEMPLATE = """ + + EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE + +""" + + +DELETE_SIGNING_CERTIFICATE_TEMPLATE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + + +LIST_SIGNING_CERTIFICATES_TEMPLATE = """ + + {{ user_name }} + + {% for cert in certificates %} + + {{ user_name }} + {{ cert.id }} + {{ cert.body }} + {{ cert.status }} + + {% endfor %} + + false + + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + +""" + + +TAG_ROLE_TEMPLATE = """ + + EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE + +""" + + +LIST_ROLE_TAG_TEMPLATE = """ + + {{ 'true' if marker else 'false' }} + {% if marker %} + {{ marker }} + {% endif %} + + {% for tag in tags %} + + {{ tag['Key'] }} + {{ tag['Value'] }} + + {% endfor %} + + + + EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE + +""" + + +UNTAG_ROLE_TEMPLATE = """ + + EXAMPLE8-90ab-cdef-fedc-ba987EXAMPLE + +""" diff --git a/moto/iam/utils.py b/moto/iam/utils.py index 1fae85a6c..2bd6448f9 100644 --- a/moto/iam/utils.py +++ b/moto/iam/utils.py @@ -7,13 +7,12 @@ import six def random_alphanumeric(length): return ''.join(six.text_type( random.choice( - string.ascii_letters + string.digits + string.ascii_letters + string.digits + "+" + "/" )) for _ in range(length) ) -def random_resource_id(): - size = 20 +def random_resource_id(size=20): chars = list(range(10)) + list(string.ascii_lowercase) return ''.join(six.text_type(random.choice(chars)) for x in range(size)) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 4bb01c095..3af3751d9 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -16,9 +16,35 @@ class ResourceNotFoundException(IoTClientError): class InvalidRequestException(IoTClientError): - def __init__(self): + def __init__(self, msg=None): self.code = 400 super(InvalidRequestException, self).__init__( "InvalidRequestException", - "The request is not valid." + msg or "The request is not valid." + ) + + +class VersionConflictException(IoTClientError): + def __init__(self, name): + self.code = 409 + super(VersionConflictException, self).__init__( + 'VersionConflictException', + 'The version for thing %s does not match the expected version.' % name + ) + + +class CertificateStateException(IoTClientError): + def __init__(self, msg, cert_id): + self.code = 406 + super(CertificateStateException, self).__init__( + 'CertificateStateException', + '%s Id: %s' % (msg, cert_id) + ) + + +class DeleteConflictException(IoTClientError): + def __init__(self, msg): + self.code = 409 + super(DeleteConflictException, self).__init__( + 'DeleteConflictException', msg ) diff --git a/moto/iot/models.py b/moto/iot/models.py index 1efa6690e..960787101 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -1,15 +1,23 @@ from __future__ import unicode_literals -import time -import boto3 -import string -import random + import hashlib +import random +import re +import string +import time import uuid -from moto.core import BaseBackend, BaseModel from collections import OrderedDict +from datetime import datetime + +import boto3 + +from moto.core import BaseBackend, BaseModel from .exceptions import ( + CertificateStateException, + DeleteConflictException, ResourceNotFoundException, - InvalidRequestException + InvalidRequestException, + VersionConflictException ) @@ -29,6 +37,7 @@ class FakeThing(BaseModel): def to_dict(self, include_default_client_id=False): obj = { 'thingName': self.thing_name, + 'thingArn': self.arn, 'attributes': self.attributes, 'version': self.version } @@ -44,6 +53,7 @@ class FakeThingType(BaseModel): self.region_name = region_name self.thing_type_name = thing_type_name self.thing_type_properties = thing_type_properties + self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id t = time.time() self.metadata = { 'deprecated': False, @@ -54,13 +64,39 @@ class FakeThingType(BaseModel): def to_dict(self): return { 'thingTypeName': self.thing_type_name, + 'thingTypeId': self.thing_type_id, 'thingTypeProperties': self.thing_type_properties, 'thingTypeMetadata': self.metadata } +class FakeThingGroup(BaseModel): + def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): + self.region_name = region_name + self.thing_group_name = thing_group_name + self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id + self.version = 1 # TODO: tmp + self.parent_group_name = parent_group_name + self.thing_group_properties = thing_group_properties or {} + t = time.time() + self.metadata = { + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) + self.things = OrderedDict() + + def to_dict(self): + return { + 'thingGroupName': self.thing_group_name, + 'thingGroupId': self.thing_group_id, + 'version': self.version, + 'thingGroupProperties': self.thing_group_properties, + 'thingGroupMetadata': self.metadata + } + + class FakeCertificate(BaseModel): - def __init__(self, certificate_pem, status, region_name): + def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None): m = hashlib.sha256() m.update(str(uuid.uuid4()).encode('utf-8')) self.certificate_id = m.hexdigest() @@ -73,12 +109,18 @@ class FakeCertificate(BaseModel): self.transfer_data = {} self.creation_date = time.time() self.last_modified_date = self.creation_date + self.ca_certificate_id = None + self.ca_certificate_pem = ca_certificate_pem + if ca_certificate_pem: + m.update(str(uuid.uuid4()).encode('utf-8')) + self.ca_certificate_id = m.hexdigest() def to_dict(self): return { 'certificateArn': self.arn, 'certificateId': self.certificate_id, + 'caCertificateId': self.ca_certificate_id, 'status': self.status, 'creationDate': self.creation_date } @@ -131,12 +173,78 @@ class FakePolicy(BaseModel): } +class FakeJob(BaseModel): + JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" + JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) + + def __init__(self, job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, region_name): + if not self._job_id_matcher(self.JOB_ID_REGEX, job_id): + raise InvalidRequestException() + + self.region_name = region_name + self.job_id = job_id + self.job_arn = 'arn:aws:iot:%s:1:job/%s' % (self.region_name, job_id) + self.targets = targets + self.document_source = document_source + self.document = document + self.description = description + self.presigned_url_config = presigned_url_config + self.target_selection = target_selection + self.job_executions_rollout_config = job_executions_rollout_config + self.status = None # IN_PROGRESS | CANCELED | COMPLETED + self.comment = None + self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.completed_at = None + self.job_process_details = { + 'processingTargets': targets, + 'numberOfQueuedThings': 1, + 'numberOfCanceledThings': 0, + 'numberOfSucceededThings': 0, + 'numberOfFailedThings': 0, + 'numberOfRejectedThings': 0, + 'numberOfInProgressThings': 0, + 'numberOfRemovedThings': 0 + } + self.document_parameters = document_parameters + + def to_dict(self): + obj = { + 'jobArn': self.job_arn, + 'jobId': self.job_id, + 'targets': self.targets, + 'description': self.description, + 'presignedUrlConfig': self.presigned_url_config, + 'targetSelection': self.target_selection, + 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, + 'status': self.status, + 'comment': self.comment, + 'createdAt': self.created_at, + 'lastUpdatedAt': self.last_updated_at, + 'completedAt': self.completedAt, + 'jobProcessDetails': self.job_process_details, + 'documentParameters': self.document_parameters, + 'document': self.document, + 'documentSource': self.document_source + } + + return obj + + def _job_id_matcher(self, regex, argument): + regex_match = regex.match(argument) + length_match = len(argument) <= 64 + return regex_match and length_match + + class IoTBackend(BaseBackend): def __init__(self, region_name=None): super(IoTBackend, self).__init__() self.region_name = region_name self.things = OrderedDict() + self.jobs = OrderedDict() self.thing_types = OrderedDict() + self.thing_groups = OrderedDict() self.certificates = OrderedDict() self.policies = OrderedDict() self.principal_policies = OrderedDict() @@ -174,15 +282,37 @@ class IoTBackend(BaseBackend): def list_thing_types(self, thing_type_name=None): if thing_type_name: - # It's wierd but thing_type_name is filterd by forward match, not complete match + # It's weird but thing_type_name is filtered by forward match, not complete match return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] - thing_types = self.thing_types.values() - return thing_types + return self.thing_types.values() - def list_things(self, attribute_name, attribute_value, thing_type_name): - # TODO: filter by attributess or thing_type - things = self.things.values() - return things + def list_things(self, attribute_name, attribute_value, thing_type_name, max_results, token): + all_things = [_.to_dict() for _ in self.things.values()] + if attribute_name is not None and thing_type_name is not None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value and + "thingTypeName" in elem and + elem["thingTypeName"] == thing_type_name, all_things)) + elif attribute_name is not None and thing_type_name is None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value, all_things)) + elif attribute_name is None and thing_type_name is not None: + filtered_things = list( + filter(lambda elem: "thingTypeName" in elem and elem["thingTypeName"] == thing_type_name, all_things)) + else: + filtered_things = all_things + + if token is None: + things = filtered_things[0:max_results] + next_token = str(max_results) if len(filtered_things) > max_results else None + else: + token = int(token) + things = filtered_things[token:token + max_results] + next_token = str(token + max_results) if len(filtered_things) > token + max_results else None + + return things, next_token def describe_thing(self, thing_name): things = [_ for _ in self.things.values() if _.thing_name == thing_name] @@ -256,7 +386,25 @@ class IoTBackend(BaseBackend): return certificate, key_pair def delete_certificate(self, certificate_id): - self.describe_certificate(certificate_id) + cert = self.describe_certificate(certificate_id) + if cert.status == 'ACTIVE': + raise CertificateStateException( + 'Certificate must be deactivated (not ACTIVE) before deletion.', certificate_id) + + certs = [k[0] for k, v in self.principal_things.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Things must be detached before deletion (arn: %s)' % certs[0] + ) + + certs = [k[0] for k, v in self.principal_policies.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Certificate policies must be detached before deletion (arn: %s)' % certs[0] + ) + del self.certificates[certificate_id] def describe_certificate(self, certificate_id): @@ -268,6 +416,12 @@ class IoTBackend(BaseBackend): def list_certificates(self): return self.certificates.values() + def register_certificate(self, certificate_pem, ca_certificate_pem, set_as_active, status): + certificate = FakeCertificate(certificate_pem, 'ACTIVE' if set_as_active else status, + self.region_name, ca_certificate_pem) + self.certificates[certificate.certificate_id] = certificate + return certificate + def update_certificate(self, certificate_id, new_status): cert = self.describe_certificate(certificate_id) # TODO: validate new_status @@ -289,6 +443,14 @@ class IoTBackend(BaseBackend): return policies[0] def delete_policy(self, policy_name): + + policies = [k[1] for k, v in self.principal_policies.items() if k[1] == policy_name] + if len(policies) > 0: + raise DeleteConflictException( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' + % policy_name + ) + policy = self.get_policy(policy_name) del self.policies[policy.name] @@ -307,6 +469,14 @@ class IoTBackend(BaseBackend): pass raise ResourceNotFoundException() + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + def attach_principal_policy(self, policy_name, principal_arn): principal = self._get_principal(principal_arn) policy = self.get_policy(policy_name) @@ -315,6 +485,15 @@ class IoTBackend(BaseBackend): return self.principal_policies[k] = (principal, policy) + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + def detach_principal_policy(self, policy_name, principal_arn): # this may raises ResourceNotFoundException self._get_principal(principal_arn) @@ -359,6 +538,135 @@ class IoTBackend(BaseBackend): principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] return principals + def describe_thing_group(self, thing_group_name): + thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] + if len(thing_groups) == 0: + raise ResourceNotFoundException() + return thing_groups[0] + + def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): + thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) + self.thing_groups[thing_group.arn] = thing_group + return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id + + def delete_thing_group(self, thing_group_name, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + del self.thing_groups[thing_group.arn] + + def list_thing_groups(self, parent_group, name_prefix_filter, recursive): + thing_groups = self.thing_groups.values() + return thing_groups + + def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + if expected_version and expected_version != thing_group.version: + raise VersionConflictException(thing_group_name) + attribute_payload = thing_group_properties.get('attributePayload', None) + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing_group.thing_group_properties['attributePayload']['attributes'] = attributes + else: + thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) + elif attribute_payload is not None and 'attributes' not in attribute_payload: + thing_group.attributes = {} + thing_group.version = thing_group.version + 1 + return thing_group.version + + def _identify_thing_group(self, thing_group_name, thing_group_arn): + # identify thing group + if thing_group_name is None and thing_group_arn is None: + raise InvalidRequestException( + ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' + ) + if thing_group_name is not None: + thing_group = self.describe_thing_group(thing_group_name) + if thing_group_arn and thing_group.arn != thing_group_arn: + raise InvalidRequestException( + 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' + ) + elif thing_group_arn is not None: + if thing_group_arn not in self.thing_groups: + raise InvalidRequestException() + thing_group = self.thing_groups[thing_group_arn] + return thing_group + + def _identify_thing(self, thing_name, thing_arn): + # identify thing + if thing_name is None and thing_arn is None: + raise InvalidRequestException( + 'Both thingArn and thingName are empty. Need to specify at least one of them' + ) + if thing_name is not None: + thing = self.describe_thing(thing_name) + if thing_arn and thing.arn != thing_arn: + raise InvalidRequestException( + 'ThingName thingArn does not match specified thingName in request' + ) + elif thing_arn is not None: + if thing_arn not in self.things: + raise InvalidRequestException() + thing = self.things[thing_arn] + return thing + + def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn in thing_group.things: + # aws ignores duplicate registration + return + thing_group.things[thing.arn] = thing + + def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn not in thing_group.things: + # aws ignores non-registered thing + return + del thing_group.things[thing.arn] + + def list_things_in_thing_group(self, thing_group_name, recursive): + thing_group = self.describe_thing_group(thing_group_name) + return thing_group.things.values() + + def list_thing_groups_for_thing(self, thing_name): + thing = self.describe_thing(thing_name) + all_thing_groups = self.list_thing_groups(None, None, None) + ret = [] + for thing_group in all_thing_groups: + if thing.arn in thing_group.things: + ret.append({ + 'groupName': thing_group.thing_group_name, + 'groupArn': thing_group.arn + }) + return ret + + def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): + thing = self.describe_thing(thing_name) + for thing_group_name in thing_groups_to_add: + thing_group = self.describe_thing_group(thing_group_name) + self.add_thing_to_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + for thing_group_name in thing_groups_to_remove: + thing_group = self.describe_thing_group(thing_group_name) + self.remove_thing_from_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + + def create_job(self, job_id, targets, document_source, document, description, presigned_url_config, + target_selection, job_executions_rollout_config, document_parameters): + job = FakeJob(job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, self.region_name) + self.jobs[job_id] = job + return job.job_arn, job_id, description + + def describe_job(self, job_id): + return self.jobs[job_id] + available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index bbe2bb016..3821c1c79 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,7 +1,9 @@ from __future__ import unicode_literals + +import json + from moto.core.responses import BaseResponse from .models import iot_backends -import json class IoTResponse(BaseResponse): @@ -32,38 +34,45 @@ class IoTResponse(BaseResponse): return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) def list_thing_types(self): - # previous_next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier thing_type_name = self._get_param("thingTypeName") thing_types = self.iot_backend.list_thing_types( thing_type_name=thing_type_name ) - # TODO: support next_token and max_results - next_token = None - return json.dumps(dict(thingTypes=[_.to_dict() for _ in thing_types], nextToken=next_token)) + thing_types = [_.to_dict() for _ in thing_types] + if previous_next_token is None: + result = thing_types[0:max_results] + next_token = str(max_results) if len(thing_types) > max_results else None + else: + token = int(previous_next_token) + result = thing_types[token:token + max_results] + next_token = str(token + max_results) if len(thing_types) > token + max_results else None + + return json.dumps(dict(thingTypes=result, nextToken=next_token)) def list_things(self): - # previous_next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier attribute_name = self._get_param("attributeName") attribute_value = self._get_param("attributeValue") thing_type_name = self._get_param("thingTypeName") - things = self.iot_backend.list_things( + things, next_token = self.iot_backend.list_things( attribute_name=attribute_name, attribute_value=attribute_value, thing_type_name=thing_type_name, + max_results=max_results, + token=previous_next_token ) - # TODO: support next_token and max_results - next_token = None - return json.dumps(dict(things=[_.to_dict() for _ in things], nextToken=next_token)) + + return json.dumps(dict(things=things, nextToken=next_token)) def describe_thing(self): thing_name = self._get_param("thingName") thing = self.iot_backend.describe_thing( thing_name=thing_name, ) - print(thing.to_dict(include_default_client_id=True)) return json.dumps(thing.to_dict(include_default_client_id=True)) def describe_thing_type(self): @@ -104,8 +113,44 @@ class IoTResponse(BaseResponse): ) return json.dumps(dict()) + def create_job(self): + job_arn, job_id, description = self.iot_backend.create_job( + job_id=self._get_param("jobId"), + targets=self._get_param("targets"), + description=self._get_param("description"), + document_source=self._get_param("documentSource"), + document=self._get_param("document"), + presigned_url_config=self._get_param("presignedUrlConfig"), + target_selection=self._get_param("targetSelection"), + job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), + document_parameters=self._get_param("documentParameters") + ) + + return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) + + def describe_job(self): + job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) + return json.dumps(dict( + documentSource=job.document_source, + job=dict( + comment=job.comment, + completedAt=job.completed_at, + createdAt=job.created_at, + description=job.description, + documentParameters=job.document_parameters, + jobArn=job.job_arn, + jobExecutionsRolloutConfig=job.job_executions_rollout_config, + jobId=job.job_id, + jobProcessDetails=job.job_process_details, + lastUpdatedAt=job.last_updated_at, + presignedUrlConfig=job.presigned_url_config, + status=job.status, + targets=job.targets, + targetSelection=job.target_selection + ))) + def create_keys_and_certificate(self): - set_as_active = self._get_param("setAsActive") + set_as_active = self._get_bool_param("setAsActive") cert, key_pair = self.iot_backend.create_keys_and_certificate( set_as_active=set_as_active, ) @@ -135,9 +180,23 @@ class IoTResponse(BaseResponse): # marker = self._get_param("marker") # ascending_order = self._get_param("ascendingOrder") certificates = self.iot_backend.list_certificates() - # TODO: handle pagination + # TODO: implement pagination in the future return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) + def register_certificate(self): + certificate_pem = self._get_param("certificatePem") + ca_certificate_pem = self._get_param("caCertificatePem") + set_as_active = self._get_bool_param("setAsActive") + status = self._get_param("status") + + cert = self.iot_backend.register_certificate( + certificate_pem=certificate_pem, + ca_certificate_pem=ca_certificate_pem, + set_as_active=set_as_active, + status=status + ) + return json.dumps(dict(certificateId=cert.certificate_id, certificateArn=cert.arn)) + def update_certificate(self): certificate_id = self._get_param("certificateId") new_status = self._get_param("newStatus") @@ -162,7 +221,7 @@ class IoTResponse(BaseResponse): # ascending_order = self._get_param("ascendingOrder") policies = self.iot_backend.list_policies() - # TODO: handle pagination + # TODO: implement pagination in the future return json.dumps(dict(policies=[_.to_dict() for _ in policies])) def get_policy(self): @@ -179,6 +238,15 @@ class IoTResponse(BaseResponse): ) return json.dumps(dict()) + def attach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.attach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + def attach_principal_policy(self): policy_name = self._get_param("policyName") principal = self.headers.get('x-amzn-iot-principal') @@ -188,6 +256,15 @@ class IoTResponse(BaseResponse): ) return json.dumps(dict()) + def detach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + def detach_principal_policy(self): policy_name = self._get_param("policyName") principal = self.headers.get('x-amzn-iot-principal') @@ -205,7 +282,7 @@ class IoTResponse(BaseResponse): policies = self.iot_backend.list_principal_policies( principal_arn=principal ) - # TODO: handle pagination + # TODO: implement pagination in the future next_marker = None return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) @@ -217,7 +294,7 @@ class IoTResponse(BaseResponse): principals = self.iot_backend.list_policy_principals( policy_name=policy_name, ) - # TODO: handle pagination + # TODO: implement pagination in the future next_marker = None return json.dumps(dict(principals=principals, nextMarker=next_marker)) @@ -246,7 +323,7 @@ class IoTResponse(BaseResponse): things = self.iot_backend.list_principal_things( principal_arn=principal, ) - # TODO: handle pagination + # TODO: implement pagination in the future next_token = None return json.dumps(dict(things=things, nextToken=next_token)) @@ -256,3 +333,123 @@ class IoTResponse(BaseResponse): thing_name=thing_name, ) return json.dumps(dict(principals=principals)) + + def describe_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group = self.iot_backend.describe_thing_group( + thing_group_name=thing_group_name, + ) + return json.dumps(thing_group.to_dict()) + + def create_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + parent_group_name = self._get_param("parentGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( + thing_group_name=thing_group_name, + parent_group_name=parent_group_name, + thing_group_properties=thing_group_properties, + ) + return json.dumps(dict( + thingGroupName=thing_group_name, + thingGroupArn=thing_group_arn, + thingGroupId=thing_group_id) + ) + + def delete_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing_group( + thing_group_name=thing_group_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def list_thing_groups(self): + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + parent_group = self._get_param("parentGroup") + name_prefix_filter = self._get_param("namePrefixFilter") + recursive = self._get_param("recursive") + thing_groups = self.iot_backend.list_thing_groups( + parent_group=parent_group, + name_prefix_filter=name_prefix_filter, + recursive=recursive, + ) + next_token = None + rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=rets, nextToken=next_token)) + + def update_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + expected_version = self._get_param("expectedVersion") + version = self.iot_backend.update_thing_group( + thing_group_name=thing_group_name, + thing_group_properties=thing_group_properties, + expected_version=expected_version, + ) + return json.dumps(dict(version=version)) + + def add_thing_to_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.add_thing_to_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def remove_thing_from_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.remove_thing_from_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def list_things_in_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + recursive = self._get_param("recursive") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + things = self.iot_backend.list_things_in_thing_group( + thing_group_name=thing_group_name, + recursive=recursive, + ) + next_token = None + thing_names = [_.thing_name for _ in things] + # TODO: implement pagination in the future + return json.dumps(dict(things=thing_names, nextToken=next_token)) + + def list_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + thing_groups = self.iot_backend.list_thing_groups_for_thing( + thing_name=thing_name + ) + next_token = None + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) + + def update_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] + thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] + self.iot_backend.update_thing_groups_for_thing( + thing_name=thing_name, + thing_groups_to_add=thing_groups_to_add, + thing_groups_to_remove=thing_groups_to_remove, + ) + return json.dumps(dict()) diff --git a/moto/iotdata/models.py b/moto/iotdata/models.py index 7ae517109..ad4caa89e 100644 --- a/moto/iotdata/models.py +++ b/moto/iotdata/models.py @@ -184,6 +184,10 @@ class IoTDataPlaneBackend(BaseBackend): thing.thing_shadow = new_shadow return thing.thing_shadow + def publish(self, topic, qos, payload): + # do nothing because client won't know about the result + return None + available_regions = boto3.session.Session().get_available_regions("iot-data") iotdata_backends = {region: IoTDataPlaneBackend(region) for region in available_regions} diff --git a/moto/iotdata/responses.py b/moto/iotdata/responses.py index d87479011..8ab724ed1 100644 --- a/moto/iotdata/responses.py +++ b/moto/iotdata/responses.py @@ -33,3 +33,14 @@ class IoTDataPlaneResponse(BaseResponse): thing_name=thing_name, ) return json.dumps(payload.to_dict()) + + def publish(self): + topic = self._get_param("topic") + qos = self._get_int_param("qos") + payload = self._get_param("payload") + self.iotdata_backend.publish( + topic=topic, + qos=qos, + payload=payload, + ) + return json.dumps(dict()) diff --git a/moto/kinesis/exceptions.py b/moto/kinesis/exceptions.py index e2fe02775..82f796ecc 100644 --- a/moto/kinesis/exceptions.py +++ b/moto/kinesis/exceptions.py @@ -17,7 +17,7 @@ class ResourceNotFoundError(BadRequest): class ResourceInUseError(BadRequest): def __init__(self, message): - super(ResourceNotFoundError, self).__init__() + super(ResourceInUseError, self).__init__() self.description = json.dumps({ "message": message, '__type': 'ResourceInUseException', diff --git a/moto/kinesis/models.py b/moto/kinesis/models.py index aae94bbbd..e7a389981 100644 --- a/moto/kinesis/models.py +++ b/moto/kinesis/models.py @@ -12,24 +12,27 @@ from hashlib import md5 from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel +from moto.core.utils import unix_time from .exceptions import StreamNotFoundError, ShardNotFoundError, ResourceInUseError, \ ResourceNotFoundError, InvalidArgumentError from .utils import compose_shard_iterator, compose_new_shard_iterator, decompose_shard_iterator class Record(BaseModel): - def __init__(self, partition_key, data, sequence_number, explicit_hash_key): self.partition_key = partition_key self.data = data self.sequence_number = sequence_number self.explicit_hash_key = explicit_hash_key + self.created_at_datetime = datetime.datetime.utcnow() + self.created_at = unix_time(self.created_at_datetime) def to_json(self): return { "Data": self.data, "PartitionKey": self.partition_key, "SequenceNumber": str(self.sequence_number), + "ApproximateArrivalTimestamp": self.created_at_datetime.isoformat() } @@ -48,16 +51,21 @@ class Shard(BaseModel): def get_records(self, last_sequence_id, limit): last_sequence_id = int(last_sequence_id) results = [] + secs_behind_latest = 0 for sequence_number, record in self.records.items(): if sequence_number > last_sequence_id: results.append(record) last_sequence_id = sequence_number + very_last_record = self.records[next(reversed(self.records))] + secs_behind_latest = very_last_record.created_at - record.created_at + if len(results) == limit: break - return results, last_sequence_id + millis_behind_latest = int(secs_behind_latest * 1000) + return results, last_sequence_id, millis_behind_latest def put_record(self, partition_key, data, explicit_hash_key): # Note: this function is not safe for concurrency @@ -80,6 +88,15 @@ class Shard(BaseModel): return list(self.records.keys())[-1] return 0 + def get_sequence_number_at(self, at_timestamp): + if not self.records or at_timestamp < list(self.records.values())[0].created_at: + return 0 + else: + # find the last item in the list that was created before + # at_timestamp + r = next((r for r in reversed(self.records.values()) if r.created_at < at_timestamp), None) + return r.sequence_number + def to_json(self): return { "HashKeyRange": { @@ -99,22 +116,19 @@ class Stream(BaseModel): def __init__(self, stream_name, shard_count, region): self.stream_name = stream_name self.shard_count = shard_count + self.creation_datetime = datetime.datetime.now() self.region = region self.account_number = "123456789012" self.shards = {} self.tags = {} + self.status = "ACTIVE" - if six.PY3: - izip_longest = itertools.zip_longest - else: - izip_longest = itertools.izip_longest + step = 2**128 // shard_count + hash_ranges = itertools.chain(map(lambda i: (i, i * step, (i + 1) * step), + range(shard_count - 1)), + [(shard_count - 1, (shard_count - 1) * step, 2**128)]) + for index, start, end in hash_ranges: - for index, start, end in izip_longest(range(shard_count), - range(0, 2**128, 2 ** - 128 // shard_count), - range(2**128 // shard_count, 2 ** - 128, 2**128 // shard_count), - fillvalue=2**128): shard = Shard(index, start, end) self.shards[shard.shard_id] = shard @@ -166,12 +180,23 @@ class Stream(BaseModel): "StreamDescription": { "StreamARN": self.arn, "StreamName": self.stream_name, - "StreamStatus": "ACTIVE", + "StreamStatus": self.status, "HasMoreShards": False, "Shards": [shard.to_json() for shard in self.shards.values()], } } + def to_json_summary(self): + return { + "StreamDescriptionSummary": { + "StreamARN": self.arn, + "StreamName": self.stream_name, + "StreamStatus": self.status, + "StreamCreationTimestamp": six.text_type(self.creation_datetime), + "OpenShardCount": self.shard_count, + } + } + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -215,7 +240,7 @@ class DeliveryStream(BaseModel): self.records = [] self.status = 'ACTIVE' - self.create_at = datetime.datetime.utcnow() + self.created_at = datetime.datetime.utcnow() self.last_updated = datetime.datetime.utcnow() @property @@ -256,7 +281,7 @@ class DeliveryStream(BaseModel): def to_dict(self): return { "DeliveryStreamDescription": { - "CreateTimestamp": time.mktime(self.create_at.timetuple()), + "CreateTimestamp": time.mktime(self.created_at.timetuple()), "DeliveryStreamARN": self.arn, "DeliveryStreamName": self.name, "DeliveryStreamStatus": self.status, @@ -292,6 +317,9 @@ class KinesisBackend(BaseBackend): else: raise StreamNotFoundError(stream_name) + def describe_stream_summary(self, stream_name): + return self.describe_stream(stream_name) + def list_streams(self): return self.streams.values() @@ -300,13 +328,14 @@ class KinesisBackend(BaseBackend): return self.streams.pop(stream_name) raise StreamNotFoundError(stream_name) - def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type, starting_sequence_number): + def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type, starting_sequence_number, + at_timestamp): # Validate params stream = self.describe_stream(stream_name) shard = stream.get_shard(shard_id) shard_iterator = compose_new_shard_iterator( - stream_name, shard, shard_iterator_type, starting_sequence_number + stream_name, shard, shard_iterator_type, starting_sequence_number, at_timestamp ) return shard_iterator @@ -317,12 +346,12 @@ class KinesisBackend(BaseBackend): stream = self.describe_stream(stream_name) shard = stream.get_shard(shard_id) - records, last_sequence_id = shard.get_records(last_sequence_id, limit) + records, last_sequence_id, millis_behind_latest = shard.get_records(last_sequence_id, limit) next_shard_iterator = compose_shard_iterator( stream_name, shard, last_sequence_id) - return next_shard_iterator, records + return next_shard_iterator, records, millis_behind_latest def put_record(self, stream_name, partition_key, explicit_hash_key, sequence_number_for_ordering, data): stream = self.describe_stream(stream_name) diff --git a/moto/kinesis/responses.py b/moto/kinesis/responses.py index 1ac6cd756..3a81bd9f4 100644 --- a/moto/kinesis/responses.py +++ b/moto/kinesis/responses.py @@ -33,6 +33,11 @@ class KinesisResponse(BaseResponse): stream = self.kinesis_backend.describe_stream(stream_name) return json.dumps(stream.to_json()) + def describe_stream_summary(self): + stream_name = self.parameters.get('StreamName') + stream = self.kinesis_backend.describe_stream_summary(stream_name) + return json.dumps(stream.to_json_summary()) + def list_streams(self): streams = self.kinesis_backend.list_streams() stream_names = [stream.stream_name for stream in streams] @@ -66,9 +71,10 @@ class KinesisResponse(BaseResponse): shard_iterator_type = self.parameters.get("ShardIteratorType") starting_sequence_number = self.parameters.get( "StartingSequenceNumber") + at_timestamp = self.parameters.get("Timestamp") shard_iterator = self.kinesis_backend.get_shard_iterator( - stream_name, shard_id, shard_iterator_type, starting_sequence_number, + stream_name, shard_id, shard_iterator_type, starting_sequence_number, at_timestamp ) return json.dumps({ @@ -79,12 +85,13 @@ class KinesisResponse(BaseResponse): shard_iterator = self.parameters.get("ShardIterator") limit = self.parameters.get("Limit") - next_shard_iterator, records = self.kinesis_backend.get_records( + next_shard_iterator, records, millis_behind_latest = self.kinesis_backend.get_records( shard_iterator, limit) return json.dumps({ "NextShardIterator": next_shard_iterator, - "Records": [record.to_json() for record in records] + "Records": [record.to_json() for record in records], + 'MillisBehindLatest': millis_behind_latest }) def put_record(self): diff --git a/moto/kinesis/utils.py b/moto/kinesis/utils.py index 190371b2e..0c3edbb5a 100644 --- a/moto/kinesis/utils.py +++ b/moto/kinesis/utils.py @@ -1,9 +1,21 @@ +import sys import base64 from .exceptions import InvalidArgumentError -def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting_sequence_number): +if sys.version_info[0] == 2: + encode_method = base64.encodestring + decode_method = base64.decodestring +elif sys.version_info[0] == 3: + encode_method = base64.encodebytes + decode_method = base64.decodebytes +else: + raise Exception("Python version is not supported") + + +def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting_sequence_number, + at_timestamp): if shard_iterator_type == "AT_SEQUENCE_NUMBER": last_sequence_id = int(starting_sequence_number) - 1 elif shard_iterator_type == "AFTER_SEQUENCE_NUMBER": @@ -12,6 +24,8 @@ def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting last_sequence_id = 0 elif shard_iterator_type == "LATEST": last_sequence_id = shard.get_max_sequence_number() + elif shard_iterator_type == "AT_TIMESTAMP": + last_sequence_id = shard.get_sequence_number_at(at_timestamp) else: raise InvalidArgumentError( "Invalid ShardIteratorType: {0}".format(shard_iterator_type)) @@ -19,7 +33,7 @@ def compose_new_shard_iterator(stream_name, shard, shard_iterator_type, starting def compose_shard_iterator(stream_name, shard, last_sequence_id): - return base64.encodestring( + return encode_method( "{0}:{1}:{2}".format( stream_name, shard.shard_id, @@ -29,4 +43,4 @@ def compose_shard_iterator(stream_name, shard, last_sequence_id): def decompose_shard_iterator(shard_iterator): - return base64.decodestring(shard_iterator.encode("utf-8")).decode("utf-8").split(":") + return decode_method(shard_iterator.encode("utf-8")).decode("utf-8").split(":") diff --git a/moto/kms/exceptions.py b/moto/kms/exceptions.py new file mode 100644 index 000000000..70edd3dcd --- /dev/null +++ b/moto/kms/exceptions.py @@ -0,0 +1,36 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class NotFoundException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(NotFoundException, self).__init__( + "NotFoundException", message) + + +class ValidationException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(ValidationException, self).__init__( + "ValidationException", message) + + +class AlreadyExistsException(JsonRESTError): + code = 400 + + def __init__(self, message): + super(AlreadyExistsException, self).__init__( + "AlreadyExistsException", message) + + +class NotAuthorizedException(JsonRESTError): + code = 400 + + def __init__(self): + super(NotAuthorizedException, self).__init__( + "NotAuthorizedException", None) + + self.description = '{"__type":"NotAuthorizedException"}' diff --git a/moto/kms/models.py b/moto/kms/models.py index be8c52162..2d6245ad2 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -1,9 +1,12 @@ from __future__ import unicode_literals +import os import boto.kms from moto.core import BaseBackend, BaseModel +from moto.core.utils import iso_8601_datetime_without_milliseconds, unix_time from .utils import generate_key_id from collections import defaultdict +from datetime import datetime, timedelta class Key(BaseModel): @@ -12,11 +15,14 @@ class Key(BaseModel): self.id = generate_key_id() self.policy = policy self.key_usage = key_usage + self.key_state = "Enabled" self.description = description self.enabled = True self.region = region self.account_id = "0123456789012" self.key_rotation_status = False + self.deletion_date = None + self.tags = {} @property def physical_resource_id(self): @@ -27,17 +33,21 @@ class Key(BaseModel): return "arn:aws:kms:{0}:{1}:key/{2}".format(self.region, self.account_id, self.id) def to_dict(self): - return { + key_dict = { "KeyMetadata": { "AWSAccountId": self.account_id, "Arn": self.arn, - "CreationDate": "2015-01-01 00:00:00", + "CreationDate": "%d" % unix_time(), "Description": self.description, "Enabled": self.enabled, "KeyId": self.id, "KeyUsage": self.key_usage, + "KeyState": self.key_state, } } + if self.key_state == 'PendingDeletion': + key_dict['KeyMetadata']['DeletionDate'] = iso_8601_datetime_without_milliseconds(self.deletion_date) + return key_dict def delete(self, region_name): kms_backends[region_name].delete_key(self.id) @@ -55,9 +65,14 @@ class Key(BaseModel): ) key.key_rotation_status = properties['EnableKeyRotation'] key.enabled = properties['Enabled'] - return key + def get_cfn_attribute(self, attribute_name): + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + if attribute_name == 'Arn': + return self.arn + raise UnformattedGetAttTemplateException() + class KmsBackend(BaseBackend): @@ -70,6 +85,18 @@ class KmsBackend(BaseBackend): self.keys[key.id] = key return key + def update_key_description(self, key_id, description): + key = self.keys[self.get_key_id(key_id)] + key.description = description + + def tag_resource(self, key_id, tags): + key = self.keys[self.get_key_id(key_id)] + key.tags = tags + + def list_resource_tags(self, key_id): + key = self.keys[self.get_key_id(key_id)] + return key.tags + def delete_key(self, key_id): if key_id in self.keys: if key_id in self.key_to_aliases: @@ -103,8 +130,10 @@ class KmsBackend(BaseBackend): self.key_to_aliases[target_key_id].add(alias_name) def delete_alias(self, alias_name): + """Delete the alias.""" for aliases in self.key_to_aliases.values(): - aliases.remove(alias_name) + if alias_name in aliases: + aliases.remove(alias_name) def get_all_aliases(self): return self.key_to_aliases @@ -130,6 +159,40 @@ class KmsBackend(BaseBackend): def get_key_policy(self, key_id): return self.keys[self.get_key_id(key_id)].policy + def disable_key(self, key_id): + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'Disabled' + + def enable_key(self, key_id): + self.keys[key_id].enabled = True + self.keys[key_id].key_state = 'Enabled' + + def cancel_key_deletion(self, key_id): + self.keys[key_id].key_state = 'Disabled' + self.keys[key_id].deletion_date = None + + def schedule_key_deletion(self, key_id, pending_window_in_days): + if 7 <= pending_window_in_days <= 30: + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'PendingDeletion' + self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days) + return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date) + + def generate_data_key(self, key_id, encryption_context, number_of_bytes, key_spec, grant_tokens): + key = self.keys[self.get_key_id(key_id)] + + if key_spec: + if key_spec == 'AES_128': + bytes = 16 + else: + bytes = 32 + else: + bytes = number_of_bytes + + plaintext = os.urandom(bytes) + + return plaintext, key.arn + kms_backends = {} for region in boto.kms.regions(): diff --git a/moto/kms/responses.py b/moto/kms/responses.py index 0f544e954..92195ed6b 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -5,11 +5,9 @@ import json import re import six -from boto.exception import JSONResponseError -from boto.kms.exceptions import AlreadyExistsException, NotFoundException - from moto.core.responses import BaseResponse from .models import kms_backends +from .exceptions import NotFoundException, ValidationException, AlreadyExistsException, NotAuthorizedException reserved_aliases = [ 'alias/aws/ebs', @@ -38,6 +36,28 @@ class KmsResponse(BaseResponse): policy, key_usage, description, self.region) return json.dumps(key.to_dict()) + def update_key_description(self): + key_id = self.parameters.get('KeyId') + description = self.parameters.get('Description') + + self.kms_backend.update_key_description(key_id, description) + return json.dumps(None) + + def tag_resource(self): + key_id = self.parameters.get('KeyId') + tags = self.parameters.get('Tags') + self.kms_backend.tag_resource(key_id, tags) + return json.dumps({}) + + def list_resource_tags(self): + key_id = self.parameters.get('KeyId') + tags = self.kms_backend.list_resource_tags(key_id) + return json.dumps({ + "Tags": tags, + "NextMarker": None, + "Truncated": False, + }) + def describe_key(self): key_id = self.parameters.get('KeyId') try: @@ -66,36 +86,28 @@ class KmsResponse(BaseResponse): def create_alias(self): alias_name = self.parameters['AliasName'] target_key_id = self.parameters['TargetKeyId'] - region = self.region if not alias_name.startswith('alias/'): - raise JSONResponseError(400, 'Bad Request', - body={'message': 'Invalid identifier', '__type': 'ValidationException'}) + raise ValidationException('Invalid identifier') if alias_name in reserved_aliases: - raise JSONResponseError(400, 'Bad Request', body={ - '__type': 'NotAuthorizedException'}) + raise NotAuthorizedException() if ':' in alias_name: - raise JSONResponseError(400, 'Bad Request', body={ - 'message': '{alias_name} contains invalid characters for an alias'.format(**locals()), - '__type': 'ValidationException'}) + raise ValidationException('{alias_name} contains invalid characters for an alias'.format(alias_name=alias_name)) if not re.match(r'^[a-zA-Z0-9:/_-]+$', alias_name): - raise JSONResponseError(400, 'Bad Request', body={ - 'message': "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$" - .format(**locals()), - '__type': 'ValidationException'}) + raise ValidationException("1 validation error detected: Value '{alias_name}' at 'aliasName' " + "failed to satisfy constraint: Member must satisfy regular " + "expression pattern: ^[a-zA-Z0-9:/_-]+$" + .format(alias_name=alias_name)) if self.kms_backend.alias_exists(target_key_id): - raise JSONResponseError(400, 'Bad Request', body={ - 'message': 'Aliases must refer to keys. Not aliases', - '__type': 'ValidationException'}) + raise ValidationException('Aliases must refer to keys. Not aliases') if self.kms_backend.alias_exists(alias_name): - raise AlreadyExistsException(400, 'Bad Request', body={ - 'message': 'An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} already exists' - .format(**locals()), '__type': 'AlreadyExistsException'}) + raise AlreadyExistsException('An alias with the name arn:aws:kms:{region}:012345678912:{alias_name} ' + 'already exists'.format(region=self.region, alias_name=alias_name)) self.kms_backend.add_alias(target_key_id, alias_name) @@ -103,16 +115,13 @@ class KmsResponse(BaseResponse): def delete_alias(self): alias_name = self.parameters['AliasName'] - region = self.region if not alias_name.startswith('alias/'): - raise JSONResponseError(400, 'Bad Request', - body={'message': 'Invalid identifier', '__type': 'ValidationException'}) + raise ValidationException('Invalid identifier') if not self.kms_backend.alias_exists(alias_name): - raise NotFoundException(400, 'Bad Request', body={ - 'message': 'Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format(**locals()), - '__type': 'NotFoundException'}) + raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:' + '{alias_name} is not found.'.format(region=self.region, alias_name=alias_name)) self.kms_backend.delete_alias(alias_name) @@ -150,9 +159,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.enable_key_rotation(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) @@ -162,9 +170,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.disable_key_rotation(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) def get_key_rotation_status(self): @@ -173,9 +180,8 @@ class KmsResponse(BaseResponse): try: rotation_enabled = self.kms_backend.get_key_rotation_status(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps({'KeyRotationEnabled': rotation_enabled}) def put_key_policy(self): @@ -188,9 +194,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.put_key_policy(key_id, policy) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps(None) @@ -203,9 +208,8 @@ class KmsResponse(BaseResponse): try: return json.dumps({'Policy': self.kms_backend.get_key_policy(key_id)}) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) def list_key_policies(self): key_id = self.parameters.get('KeyId') @@ -213,9 +217,8 @@ class KmsResponse(BaseResponse): try: self.kms_backend.describe_key(key_id) except KeyError: - raise JSONResponseError(404, 'Not Found', body={ - 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), - '__type': 'NotFoundException'}) + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) return json.dumps({'Truncated': False, 'PolicyNames': ['default']}) @@ -227,21 +230,117 @@ class KmsResponse(BaseResponse): value = self.parameters.get("Plaintext") if isinstance(value, six.text_type): value = value.encode('utf-8') - return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8")}) + return json.dumps({"CiphertextBlob": base64.b64encode(value).decode("utf-8"), 'KeyId': 'key_id'}) def decrypt(self): + # TODO refuse decode if EncryptionContext is not the same as when it was encrypted / generated + value = self.parameters.get("CiphertextBlob") - return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")}) + try: + return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")}) + except UnicodeDecodeError: + # Generate data key will produce random bytes which when decrypted is still returned as base64 + return json.dumps({"Plaintext": value}) + + def disable_key(self): + key_id = self.parameters.get('KeyId') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + self.kms_backend.disable_key(key_id) + except KeyError: + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) + return json.dumps(None) + + def enable_key(self): + key_id = self.parameters.get('KeyId') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + self.kms_backend.enable_key(key_id) + except KeyError: + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) + return json.dumps(None) + + def cancel_key_deletion(self): + key_id = self.parameters.get('KeyId') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + self.kms_backend.cancel_key_deletion(key_id) + except KeyError: + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) + return json.dumps({'KeyId': key_id}) + + def schedule_key_deletion(self): + key_id = self.parameters.get('KeyId') + if self.parameters.get('PendingWindowInDays') is None: + pending_window_in_days = 30 + else: + pending_window_in_days = self.parameters.get('PendingWindowInDays') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + return json.dumps({ + 'KeyId': key_id, + 'DeletionDate': self.kms_backend.schedule_key_deletion(key_id, pending_window_in_days) + }) + except KeyError: + raise NotFoundException("Key 'arn:aws:kms:{region}:012345678912:key/" + "{key_id}' does not exist".format(region=self.region, key_id=key_id)) + + def generate_data_key(self): + key_id = self.parameters.get('KeyId') + encryption_context = self.parameters.get('EncryptionContext') + number_of_bytes = self.parameters.get('NumberOfBytes') + key_spec = self.parameters.get('KeySpec') + grant_tokens = self.parameters.get('GrantTokens') + + # Param validation + if key_id.startswith('alias'): + if self.kms_backend.get_key_id_from_alias(key_id) is None: + raise NotFoundException('Alias arn:aws:kms:{region}:012345678912:{alias_name} is not found.'.format( + region=self.region, alias_name=key_id)) + else: + if self.kms_backend.get_key_id(key_id) not in self.kms_backend.keys: + raise NotFoundException('Invalid keyId') + + if number_of_bytes and (number_of_bytes > 1024 or number_of_bytes < 0): + raise ValidationException("1 validation error detected: Value '2048' at 'numberOfBytes' failed " + "to satisfy constraint: Member must have value less than or " + "equal to 1024") + + if key_spec and key_spec not in ('AES_256', 'AES_128'): + raise ValidationException("1 validation error detected: Value 'AES_257' at 'keySpec' failed " + "to satisfy constraint: Member must satisfy enum value set: " + "[AES_256, AES_128]") + if not key_spec and not number_of_bytes: + raise ValidationException("Please specify either number of bytes or key spec.") + if key_spec and number_of_bytes: + raise ValidationException("Please specify either number of bytes or key spec.") + + plaintext, key_arn = self.kms_backend.generate_data_key(key_id, encryption_context, + number_of_bytes, key_spec, grant_tokens) + + plaintext = base64.b64encode(plaintext).decode() + + return json.dumps({ + 'CiphertextBlob': plaintext, + 'Plaintext': plaintext, + 'KeyId': key_arn # not alias + }) + + def generate_data_key_without_plaintext(self): + result = json.loads(self.generate_data_key()) + del result['Plaintext'] + + return json.dumps(result) def _assert_valid_key_id(key_id): if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE): - raise JSONResponseError(404, 'Not Found', body={ - 'message': ' Invalid keyId', '__type': 'NotFoundException'}) + raise NotFoundException('Invalid keyId') def _assert_default_policy(policy_name): if policy_name != 'default': - raise JSONResponseError(404, 'Not Found', body={ - 'message': "No such policy exists", - '__type': 'NotFoundException'}) + raise NotFoundException("No such policy exists") diff --git a/moto/logs/exceptions.py b/moto/logs/exceptions.py new file mode 100644 index 000000000..bb02eced3 --- /dev/null +++ b/moto/logs/exceptions.py @@ -0,0 +1,33 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class LogsClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(LogsClientError): + def __init__(self): + self.code = 400 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The specified resource does not exist" + ) + + +class InvalidParameterException(LogsClientError): + def __init__(self, msg=None): + self.code = 400 + super(InvalidParameterException, self).__init__( + "InvalidParameterException", + msg or "A parameter is specified incorrectly." + ) + + +class ResourceAlreadyExistsException(LogsClientError): + def __init__(self): + self.code = 400 + super(ResourceAlreadyExistsException, self).__init__( + 'ResourceAlreadyExistsException', + 'The specified log group already exists' + ) diff --git a/moto/logs/models.py b/moto/logs/models.py index 09dcb3645..a44b76812 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -1,6 +1,10 @@ from moto.core import BaseBackend import boto.logs from moto.core.utils import unix_time_millis +from .exceptions import ( + ResourceNotFoundException, + ResourceAlreadyExistsException +) class LogEvent: @@ -15,7 +19,7 @@ class LogEvent: def to_filter_dict(self): return { - "eventId": self.eventId, + "eventId": str(self.eventId), "ingestionTime": self.ingestionTime, # "logStreamName": "message": self.message, @@ -49,23 +53,29 @@ class LogStream: self.__class__._log_ids += 1 def _update(self): - self.firstEventTimestamp = min([x.timestamp for x in self.events]) - self.lastEventTimestamp = max([x.timestamp for x in self.events]) + # events can be empty when stream is described soon after creation + self.firstEventTimestamp = min([x.timestamp for x in self.events]) if self.events else None + self.lastEventTimestamp = max([x.timestamp for x in self.events]) if self.events else None def to_describe_dict(self): # Compute start and end times self._update() - return { + res = { "arn": self.arn, "creationTime": self.creationTime, - "firstEventTimestamp": self.firstEventTimestamp, - "lastEventTimestamp": self.lastEventTimestamp, - "lastIngestionTime": self.lastIngestionTime, "logStreamName": self.logStreamName, "storedBytes": self.storedBytes, - "uploadSequenceToken": str(self.uploadSequenceToken), } + if self.events: + rest = { + "firstEventTimestamp": self.firstEventTimestamp, + "lastEventTimestamp": self.lastEventTimestamp, + "lastIngestionTime": self.lastIngestionTime, + "uploadSequenceToken": str(self.uploadSequenceToken), + } + res.update(rest) + return res def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): # TODO: ensure sequence_token @@ -76,7 +86,7 @@ class LogStream: self.events += [LogEvent(self.lastIngestionTime, log_event) for log_event in log_events] self.uploadSequenceToken += 1 - return self.uploadSequenceToken + return '{:056d}'.format(self.uploadSequenceToken) def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): def filter_func(event): @@ -122,22 +132,30 @@ class LogGroup: def __init__(self, region, name, tags): self.name = name self.region = region + self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format( + region=region, log_group=name) + self.creationTime = unix_time_millis() self.tags = tags self.streams = dict() # {name: LogStream} + self.retentionInDays = None # AWS defaults to Never Expire for log group retention def create_log_stream(self, log_stream_name): - assert log_stream_name not in self.streams + if log_stream_name in self.streams: + raise ResourceAlreadyExistsException() self.streams[log_stream_name] = LogStream(self.region, self.name, log_stream_name) def delete_log_stream(self, log_stream_name): - assert log_stream_name in self.streams + if log_stream_name not in self.streams: + raise ResourceNotFoundException() del self.streams[log_stream_name] def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): + # responses only logStreamName, creationTime, arn, storedBytes when no events are stored. + log_streams = [(name, stream.to_describe_dict()) for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)] def sorter(item): - return item[0] if order_by == 'logStreamName' else item[1]['lastEventTimestamp'] + return item[0] if order_by == 'logStreamName' else item[1].get('lastEventTimestamp', 0) if next_token is None: next_token = 0 @@ -151,18 +169,18 @@ class LogGroup: return log_streams_page, new_token def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): - assert log_stream_name in self.streams + if log_stream_name not in self.streams: + raise ResourceNotFoundException() stream = self.streams[log_stream_name] return stream.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): - assert log_stream_name in self.streams + if log_stream_name not in self.streams: + raise ResourceNotFoundException() stream = self.streams[log_stream_name] return stream.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): - assert not filter_pattern # TODO: impl - streams = [stream for name, stream in self.streams.items() if not log_stream_names or name in log_stream_names] events = [] @@ -170,7 +188,7 @@ class LogGroup: events += stream.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) if interleaved: - events = sorted(events, key=lambda event: event.timestamp) + events = sorted(events, key=lambda event: event['timestamp']) if next_token is None: next_token = 0 @@ -183,6 +201,22 @@ class LogGroup: searched_streams = [{"logStreamName": stream.logStreamName, "searchedCompletely": True} for stream in streams] return events_page, next_token, searched_streams + def to_describe_dict(self): + log_group = { + "arn": self.arn, + "creationTime": self.creationTime, + "logGroupName": self.name, + "metricFilterCount": 0, + "storedBytes": sum(s.storedBytes for s in self.streams.values()), + } + # AWS only returns retentionInDays if a value is set for the log group (ie. not Never Expire) + if self.retentionInDays: + log_group["retentionInDays"] = self.retentionInDays + return log_group + + def set_retention_policy(self, retention_in_days): + self.retentionInDays = retention_in_days + class LogsBackend(BaseBackend): def __init__(self, region_name): @@ -195,7 +229,8 @@ class LogsBackend(BaseBackend): self.__init__(region_name) def create_log_group(self, log_group_name, tags): - assert log_group_name not in self.groups + if log_group_name in self.groups: + raise ResourceAlreadyExistsException() self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def ensure_log_group(self, log_group_name, tags): @@ -204,39 +239,74 @@ class LogsBackend(BaseBackend): self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def delete_log_group(self, log_group_name): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() del self.groups[log_group_name] + def describe_log_groups(self, limit, log_group_name_prefix, next_token): + if log_group_name_prefix is None: + log_group_name_prefix = '' + if next_token is None: + next_token = 0 + + groups = [group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix)] + groups = sorted(groups, key=lambda x: x['creationTime'], reverse=True) + groups_page = groups[next_token:next_token + limit] + + next_token += limit + if next_token >= len(groups): + next_token = None + + return groups_page, next_token + def create_log_stream(self, log_group_name, log_stream_name): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.create_log_stream(log_stream_name) def delete_log_stream(self, log_group_name, log_stream_name): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.delete_log_stream(log_stream_name) def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.describe_log_streams(descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by) def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): # TODO: add support for sequence_tokens - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) + def put_retention_policy(self, log_group_name, retention_in_days): + if log_group_name not in self.groups: + raise ResourceNotFoundException() + log_group = self.groups[log_group_name] + return log_group.set_retention_policy(retention_in_days) + + def delete_retention_policy(self, log_group_name): + if log_group_name not in self.groups: + raise ResourceNotFoundException() + log_group = self.groups[log_group_name] + return log_group.set_retention_policy(None) + logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()} diff --git a/moto/logs/responses.py b/moto/logs/responses.py index e0a17f5f8..39f24a260 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -33,6 +33,18 @@ class LogsResponse(BaseResponse): self.logs_backend.delete_log_group(log_group_name) return '' + def describe_log_groups(self): + log_group_name_prefix = self._get_param('logGroupNamePrefix') + next_token = self._get_param('nextToken') + limit = self._get_param('limit', 50) + assert limit <= 50 + groups, next_token = self.logs_backend.describe_log_groups( + limit, log_group_name_prefix, next_token) + return json.dumps({ + "logGroups": groups, + "nextToken": next_token + }) + def create_log_stream(self): log_group_name = self._get_param('logGroupName') log_stream_name = self._get_param('logStreamName') @@ -87,9 +99,8 @@ class LogsResponse(BaseResponse): events, next_backward_token, next_foward_token = \ self.logs_backend.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) - return json.dumps({ - "events": [ob.__dict__ for ob in events], + "events": events, "nextBackwardToken": next_backward_token, "nextForwardToken": next_foward_token }) @@ -112,3 +123,14 @@ class LogsResponse(BaseResponse): "nextToken": next_token, "searchedLogStreams": searched_streams }) + + def put_retention_policy(self): + log_group_name = self._get_param('logGroupName') + retention_in_days = self._get_param('retentionInDays') + self.logs_backend.put_retention_policy(log_group_name, retention_in_days) + return '' + + def delete_retention_policy(self): + log_group_name = self._get_param('logGroupName') + self.logs_backend.delete_retention_policy(log_group_name) + return '' diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index fe8c882a7..4fe428c65 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -398,11 +398,82 @@ class Stack(BaseModel): return response +class App(BaseModel): + + def __init__(self, stack_id, name, type, + shortname=None, + description=None, + datasources=None, + app_source=None, + domains=None, + enable_ssl=False, + ssl_configuration=None, + attributes=None, + environment=None): + self.stack_id = stack_id + self.name = name + self.type = type + self.shortname = shortname + self.description = description + + self.datasources = datasources + if datasources is None: + self.datasources = [] + + self.app_source = app_source + if app_source is None: + self.app_source = {} + + self.domains = domains + if domains is None: + self.domains = [] + + self.enable_ssl = enable_ssl + + self.ssl_configuration = ssl_configuration + if ssl_configuration is None: + self.ssl_configuration = {} + + self.attributes = attributes + if attributes is None: + self.attributes = {} + + self.environment = environment + if environment is None: + self.environment = {} + + self.id = "{0}".format(uuid.uuid4()) + self.created_at = datetime.datetime.utcnow() + + def __eq__(self, other): + return self.id == other.id + + def to_dict(self): + d = { + "AppId": self.id, + "AppSource": self.app_source, + "Attributes": self.attributes, + "CreatedAt": self.created_at.isoformat(), + "Datasources": self.datasources, + "Description": self.description, + "Domains": self.domains, + "EnableSsl": self.enable_ssl, + "Environment": self.environment, + "Name": self.name, + "Shortname": self.shortname, + "SslConfiguration": self.ssl_configuration, + "StackId": self.stack_id, + "Type": self.type + } + return d + + class OpsWorksBackend(BaseBackend): def __init__(self, ec2_backend): self.stacks = {} self.layers = {} + self.apps = {} self.instances = {} self.ec2_backend = ec2_backend @@ -435,6 +506,20 @@ class OpsWorksBackend(BaseBackend): self.stacks[stackid].layers.append(layer) return layer + def create_app(self, **kwargs): + name = kwargs['name'] + stackid = kwargs['stack_id'] + if stackid not in self.stacks: + raise ResourceNotFoundException(stackid) + if name in [a.name for a in self.stacks[stackid].apps]: + raise ValidationException( + 'There is already an app named "{0}" ' + 'for this stack'.format(name)) + app = App(**kwargs) + self.apps[app.id] = app + self.stacks[stackid].apps.append(app) + return app + def create_instance(self, **kwargs): stack_id = kwargs['stack_id'] layer_ids = kwargs['layer_ids'] @@ -502,6 +587,22 @@ class OpsWorksBackend(BaseBackend): raise ResourceNotFoundException(", ".join(unknown_layers)) return [self.layers[id].to_dict() for id in layer_ids] + def describe_apps(self, stack_id, app_ids): + if stack_id is not None and app_ids is not None: + raise ValidationException( + "Please provide one or more app IDs or a stack ID" + ) + if stack_id is not None: + if stack_id not in self.stacks: + raise ResourceNotFoundException( + "Unable to find stack with ID {0}".format(stack_id)) + return [app.to_dict() for app in self.stacks[stack_id].apps] + + unknown_apps = set(app_ids) - set(self.apps.keys()) + if unknown_apps: + raise ResourceNotFoundException(", ".join(unknown_apps)) + return [self.apps[id].to_dict() for id in app_ids] + def describe_instances(self, instance_ids, layer_id, stack_id): if len(list(filter(None, (instance_ids, layer_id, stack_id)))) != 1: raise ValidationException("Please provide either one or more " diff --git a/moto/opsworks/responses.py b/moto/opsworks/responses.py index 42e0f2c5c..c9f8fe125 100644 --- a/moto/opsworks/responses.py +++ b/moto/opsworks/responses.py @@ -75,6 +75,24 @@ class OpsWorksResponse(BaseResponse): layer = self.opsworks_backend.create_layer(**kwargs) return json.dumps({"LayerId": layer.id}, indent=1) + def create_app(self): + kwargs = dict( + stack_id=self.parameters.get('StackId'), + name=self.parameters.get('Name'), + type=self.parameters.get('Type'), + shortname=self.parameters.get('Shortname'), + description=self.parameters.get('Description'), + datasources=self.parameters.get('DataSources'), + app_source=self.parameters.get('AppSource'), + domains=self.parameters.get('Domains'), + enable_ssl=self.parameters.get('EnableSsl'), + ssl_configuration=self.parameters.get('SslConfiguration'), + attributes=self.parameters.get('Attributes'), + environment=self.parameters.get('Environment') + ) + app = self.opsworks_backend.create_app(**kwargs) + return json.dumps({"AppId": app.id}, indent=1) + def create_instance(self): kwargs = dict( stack_id=self.parameters.get("StackId"), @@ -110,6 +128,12 @@ class OpsWorksResponse(BaseResponse): layers = self.opsworks_backend.describe_layers(stack_id, layer_ids) return json.dumps({"Layers": layers}, indent=1) + def describe_apps(self): + stack_id = self.parameters.get("StackId") + app_ids = self.parameters.get("AppIds") + apps = self.opsworks_backend.describe_apps(stack_id, app_ids) + return json.dumps({"Apps": apps}, indent=1) + def describe_instances(self): instance_ids = self.parameters.get("InstanceIds") layer_id = self.parameters.get("LayerId") diff --git a/moto/organizations/__init__.py b/moto/organizations/__init__.py new file mode 100644 index 000000000..372782dd3 --- /dev/null +++ b/moto/organizations/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import organizations_backend +from ..core.models import base_decorator + +organizations_backends = {"global": organizations_backend} +mock_organizations = base_decorator(organizations_backends) diff --git a/moto/organizations/models.py b/moto/organizations/models.py new file mode 100644 index 000000000..91004b9ba --- /dev/null +++ b/moto/organizations/models.py @@ -0,0 +1,443 @@ +from __future__ import unicode_literals + +import datetime +import re + +from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import RESTError +from moto.core.utils import unix_time +from moto.organizations import utils + + +class FakeOrganization(BaseModel): + + def __init__(self, feature_set): + self.id = utils.make_random_org_id() + self.root_id = utils.make_random_root_id() + self.feature_set = feature_set + self.master_account_id = utils.MASTER_ACCOUNT_ID + self.master_account_email = utils.MASTER_ACCOUNT_EMAIL + self.available_policy_types = [{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }] + + @property + def arn(self): + return utils.ORGANIZATION_ARN_FORMAT.format(self.master_account_id, self.id) + + @property + def master_account_arn(self): + return utils.MASTER_ACCOUNT_ARN_FORMAT.format(self.master_account_id, self.id) + + def describe(self): + return { + 'Organization': { + 'Id': self.id, + 'Arn': self.arn, + 'FeatureSet': self.feature_set, + 'MasterAccountArn': self.master_account_arn, + 'MasterAccountId': self.master_account_id, + 'MasterAccountEmail': self.master_account_email, + 'AvailablePolicyTypes': self.available_policy_types, + } + } + + +class FakeAccount(BaseModel): + + def __init__(self, organization, **kwargs): + self.type = 'ACCOUNT' + self.organization_id = organization.id + self.master_account_id = organization.master_account_id + self.create_account_status_id = utils.make_random_create_account_status_id() + self.id = utils.make_random_account_id() + self.name = kwargs['AccountName'] + self.email = kwargs['Email'] + self.create_time = datetime.datetime.utcnow() + self.status = 'ACTIVE' + self.joined_method = 'CREATED' + self.parent_id = organization.root_id + self.attached_policies = [] + + @property + def arn(self): + return utils.ACCOUNT_ARN_FORMAT.format( + self.master_account_id, + self.organization_id, + self.id + ) + + @property + def create_account_status(self): + return { + 'CreateAccountStatus': { + 'Id': self.create_account_status_id, + 'AccountName': self.name, + 'State': 'SUCCEEDED', + 'RequestedTimestamp': unix_time(self.create_time), + 'CompletedTimestamp': unix_time(self.create_time), + 'AccountId': self.id, + } + } + + def describe(self): + return { + 'Account': { + 'Id': self.id, + 'Arn': self.arn, + 'Email': self.email, + 'Name': self.name, + 'Status': self.status, + 'JoinedMethod': self.joined_method, + 'JoinedTimestamp': unix_time(self.create_time), + } + } + + +class FakeOrganizationalUnit(BaseModel): + + def __init__(self, organization, **kwargs): + self.type = 'ORGANIZATIONAL_UNIT' + self.organization_id = organization.id + self.master_account_id = organization.master_account_id + self.id = utils.make_random_ou_id(organization.root_id) + self.name = kwargs.get('Name') + self.parent_id = kwargs.get('ParentId') + self._arn_format = utils.OU_ARN_FORMAT + self.attached_policies = [] + + @property + def arn(self): + return self._arn_format.format( + self.master_account_id, + self.organization_id, + self.id + ) + + def describe(self): + return { + 'OrganizationalUnit': { + 'Id': self.id, + 'Arn': self.arn, + 'Name': self.name, + } + } + + +class FakeRoot(FakeOrganizationalUnit): + + def __init__(self, organization, **kwargs): + super(FakeRoot, self).__init__(organization, **kwargs) + self.type = 'ROOT' + self.id = organization.root_id + self.name = 'Root' + self.policy_types = [{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }] + self._arn_format = utils.ROOT_ARN_FORMAT + self.attached_policies = [] + + def describe(self): + return { + 'Id': self.id, + 'Arn': self.arn, + 'Name': self.name, + 'PolicyTypes': self.policy_types + } + + +class FakeServiceControlPolicy(BaseModel): + + def __init__(self, organization, **kwargs): + self.type = 'POLICY' + self.content = kwargs.get('Content') + self.description = kwargs.get('Description') + self.name = kwargs.get('Name') + self.type = kwargs.get('Type') + self.id = utils.make_random_service_control_policy_id() + self.aws_managed = False + self.organization_id = organization.id + self.master_account_id = organization.master_account_id + self._arn_format = utils.SCP_ARN_FORMAT + self.attachments = [] + + @property + def arn(self): + return self._arn_format.format( + self.master_account_id, + self.organization_id, + self.id + ) + + def describe(self): + return { + 'Policy': { + 'PolicySummary': { + 'Id': self.id, + 'Arn': self.arn, + 'Name': self.name, + 'Description': self.description, + 'Type': self.type, + 'AwsManaged': self.aws_managed, + }, + 'Content': self.content + } + } + + +class OrganizationsBackend(BaseBackend): + + def __init__(self): + self.org = None + self.accounts = [] + self.ou = [] + self.policies = [] + + def create_organization(self, **kwargs): + self.org = FakeOrganization(kwargs['FeatureSet']) + self.ou.append(FakeRoot(self.org)) + return self.org.describe() + + def describe_organization(self): + if not self.org: + raise RESTError( + 'AWSOrganizationsNotInUseException', + "Your account is not a member of an organization." + ) + return self.org.describe() + + def list_roots(self): + return dict( + Roots=[ou.describe() for ou in self.ou if isinstance(ou, FakeRoot)] + ) + + def create_organizational_unit(self, **kwargs): + new_ou = FakeOrganizationalUnit(self.org, **kwargs) + self.ou.append(new_ou) + return new_ou.describe() + + def get_organizational_unit_by_id(self, ou_id): + ou = next((ou for ou in self.ou if ou.id == ou_id), None) + if ou is None: + raise RESTError( + 'OrganizationalUnitNotFoundException', + "You specified an organizational unit that doesn't exist." + ) + return ou + + def validate_parent_id(self, parent_id): + try: + self.get_organizational_unit_by_id(parent_id) + except RESTError: + raise RESTError( + 'ParentNotFoundException', + "You specified parent that doesn't exist." + ) + return parent_id + + def describe_organizational_unit(self, **kwargs): + ou = self.get_organizational_unit_by_id(kwargs['OrganizationalUnitId']) + return ou.describe() + + def list_organizational_units_for_parent(self, **kwargs): + parent_id = self.validate_parent_id(kwargs['ParentId']) + return dict( + OrganizationalUnits=[ + { + 'Id': ou.id, + 'Arn': ou.arn, + 'Name': ou.name, + } + for ou in self.ou + if ou.parent_id == parent_id + ] + ) + + def create_account(self, **kwargs): + new_account = FakeAccount(self.org, **kwargs) + self.accounts.append(new_account) + return new_account.create_account_status + + def get_account_by_id(self, account_id): + account = next(( + account for account in self.accounts + if account.id == account_id + ), None) + if account is None: + raise RESTError( + 'AccountNotFoundException', + "You specified an account that doesn't exist." + ) + return account + + def describe_account(self, **kwargs): + account = self.get_account_by_id(kwargs['AccountId']) + return account.describe() + + def list_accounts(self): + return dict( + Accounts=[account.describe()['Account'] for account in self.accounts] + ) + + def list_accounts_for_parent(self, **kwargs): + parent_id = self.validate_parent_id(kwargs['ParentId']) + return dict( + Accounts=[ + account.describe()['Account'] + for account in self.accounts + if account.parent_id == parent_id + ] + ) + + def move_account(self, **kwargs): + new_parent_id = self.validate_parent_id(kwargs['DestinationParentId']) + self.validate_parent_id(kwargs['SourceParentId']) + account = self.get_account_by_id(kwargs['AccountId']) + index = self.accounts.index(account) + self.accounts[index].parent_id = new_parent_id + + def list_parents(self, **kwargs): + if re.compile(r'[0-9]{12}').match(kwargs['ChildId']): + child_object = self.get_account_by_id(kwargs['ChildId']) + else: + child_object = self.get_organizational_unit_by_id(kwargs['ChildId']) + return dict( + Parents=[ + { + 'Id': ou.id, + 'Type': ou.type, + } + for ou in self.ou + if ou.id == child_object.parent_id + ] + ) + + def list_children(self, **kwargs): + parent_id = self.validate_parent_id(kwargs['ParentId']) + if kwargs['ChildType'] == 'ACCOUNT': + obj_list = self.accounts + elif kwargs['ChildType'] == 'ORGANIZATIONAL_UNIT': + obj_list = self.ou + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + return dict( + Children=[ + { + 'Id': obj.id, + 'Type': kwargs['ChildType'], + } + for obj in obj_list + if obj.parent_id == parent_id + ] + ) + + def create_policy(self, **kwargs): + new_policy = FakeServiceControlPolicy(self.org, **kwargs) + self.policies.append(new_policy) + return new_policy.describe() + + def describe_policy(self, **kwargs): + if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']): + policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None) + if policy is None: + raise RESTError( + 'PolicyNotFoundException', + "You specified a policy that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + return policy.describe() + + def attach_policy(self, **kwargs): + policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None) + if (re.compile(utils.ROOT_ID_REGEX).match(kwargs['TargetId']) or + re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId'])): + ou = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None) + if ou is not None: + if ou not in ou.attached_policies: + ou.attached_policies.append(policy) + policy.attachments.append(ou) + else: + raise RESTError( + 'OrganizationalUnitNotFoundException', + "You specified an organizational unit that doesn't exist." + ) + elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']): + account = next((a for a in self.accounts if a.id == kwargs['TargetId']), None) + if account is not None: + if account not in account.attached_policies: + account.attached_policies.append(policy) + policy.attachments.append(account) + else: + raise RESTError( + 'AccountNotFoundException', + "You specified an account that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + + def list_policies(self, **kwargs): + return dict(Policies=[ + p.describe()['Policy']['PolicySummary'] for p in self.policies + ]) + + def list_policies_for_target(self, **kwargs): + if re.compile(utils.OU_ID_REGEX).match(kwargs['TargetId']): + obj = next((ou for ou in self.ou if ou.id == kwargs['TargetId']), None) + if obj is None: + raise RESTError( + 'OrganizationalUnitNotFoundException', + "You specified an organizational unit that doesn't exist." + ) + elif re.compile(utils.ACCOUNT_ID_REGEX).match(kwargs['TargetId']): + obj = next((a for a in self.accounts if a.id == kwargs['TargetId']), None) + if obj is None: + raise RESTError( + 'AccountNotFoundException', + "You specified an account that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + return dict(Policies=[ + p.describe()['Policy']['PolicySummary'] for p in obj.attached_policies + ]) + + def list_targets_for_policy(self, **kwargs): + if re.compile(utils.SCP_ID_REGEX).match(kwargs['PolicyId']): + policy = next((p for p in self.policies if p.id == kwargs['PolicyId']), None) + if policy is None: + raise RESTError( + 'PolicyNotFoundException', + "You specified a policy that doesn't exist." + ) + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + objects = [ + { + 'TargetId': obj.id, + 'Arn': obj.arn, + 'Name': obj.name, + 'Type': obj.type, + } for obj in policy.attachments + ] + return dict(Targets=objects) + + +organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py new file mode 100644 index 000000000..814f30bad --- /dev/null +++ b/moto/organizations/responses.py @@ -0,0 +1,117 @@ +from __future__ import unicode_literals +import json + +from moto.core.responses import BaseResponse +from .models import organizations_backend + + +class OrganizationsResponse(BaseResponse): + + @property + def organizations_backend(self): + return organizations_backend + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + def _get_param(self, param, default=None): + return self.request_params.get(param, default) + + def create_organization(self): + return json.dumps( + self.organizations_backend.create_organization(**self.request_params) + ) + + def describe_organization(self): + return json.dumps( + self.organizations_backend.describe_organization() + ) + + def list_roots(self): + return json.dumps( + self.organizations_backend.list_roots() + ) + + def create_organizational_unit(self): + return json.dumps( + self.organizations_backend.create_organizational_unit(**self.request_params) + ) + + def describe_organizational_unit(self): + return json.dumps( + self.organizations_backend.describe_organizational_unit(**self.request_params) + ) + + def list_organizational_units_for_parent(self): + return json.dumps( + self.organizations_backend.list_organizational_units_for_parent(**self.request_params) + ) + + def list_parents(self): + return json.dumps( + self.organizations_backend.list_parents(**self.request_params) + ) + + def create_account(self): + return json.dumps( + self.organizations_backend.create_account(**self.request_params) + ) + + def describe_account(self): + return json.dumps( + self.organizations_backend.describe_account(**self.request_params) + ) + + def list_accounts(self): + return json.dumps( + self.organizations_backend.list_accounts() + ) + + def list_accounts_for_parent(self): + return json.dumps( + self.organizations_backend.list_accounts_for_parent(**self.request_params) + ) + + def move_account(self): + return json.dumps( + self.organizations_backend.move_account(**self.request_params) + ) + + def list_children(self): + return json.dumps( + self.organizations_backend.list_children(**self.request_params) + ) + + def create_policy(self): + return json.dumps( + self.organizations_backend.create_policy(**self.request_params) + ) + + def describe_policy(self): + return json.dumps( + self.organizations_backend.describe_policy(**self.request_params) + ) + + def attach_policy(self): + return json.dumps( + self.organizations_backend.attach_policy(**self.request_params) + ) + + def list_policies(self): + return json.dumps( + self.organizations_backend.list_policies(**self.request_params) + ) + + def list_policies_for_target(self): + return json.dumps( + self.organizations_backend.list_policies_for_target(**self.request_params) + ) + + def list_targets_for_policy(self): + return json.dumps( + self.organizations_backend.list_targets_for_policy(**self.request_params) + ) diff --git a/moto/organizations/urls.py b/moto/organizations/urls.py new file mode 100644 index 000000000..7911f5b53 --- /dev/null +++ b/moto/organizations/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import OrganizationsResponse + +url_bases = [ + "https?://organizations.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': OrganizationsResponse.dispatch, +} diff --git a/moto/organizations/utils.py b/moto/organizations/utils.py new file mode 100644 index 000000000..bde3660d2 --- /dev/null +++ b/moto/organizations/utils.py @@ -0,0 +1,76 @@ +from __future__ import unicode_literals + +import random +import string + +MASTER_ACCOUNT_ID = '123456789012' +MASTER_ACCOUNT_EMAIL = 'fakeorg@moto-example.com' +ORGANIZATION_ARN_FORMAT = 'arn:aws:organizations::{0}:organization/{1}' +MASTER_ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{0}' +ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}' +ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}' +OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}' +SCP_ARN_FORMAT = 'arn:aws:organizations::{0}:policy/{1}/service_control_policy/{2}' + +CHARSET = string.ascii_lowercase + string.digits +ORG_ID_SIZE = 10 +ROOT_ID_SIZE = 4 +ACCOUNT_ID_SIZE = 12 +OU_ID_SUFFIX_SIZE = 8 +CREATE_ACCOUNT_STATUS_ID_SIZE = 8 +SCP_ID_SIZE = 8 + +EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" +ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % ORG_ID_SIZE +ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % ROOT_ID_SIZE +OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (ROOT_ID_SIZE, OU_ID_SUFFIX_SIZE) +ACCOUNT_ID_REGEX = r'[0-9]{%s}' % ACCOUNT_ID_SIZE +CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % CREATE_ACCOUNT_STATUS_ID_SIZE +SCP_ID_REGEX = r'p-[a-z0-9]{%s}' % SCP_ID_SIZE + + +def make_random_org_id(): + # The regex pattern for an organization ID string requires "o-" + # followed by from 10 to 32 lower-case letters or digits. + # e.g. 'o-vipjnq5z86' + return 'o-' + ''.join(random.choice(CHARSET) for x in range(ORG_ID_SIZE)) + + +def make_random_root_id(): + # The regex pattern for a root ID string requires "r-" followed by + # from 4 to 32 lower-case letters or digits. + # e.g. 'r-3zwx' + return 'r-' + ''.join(random.choice(CHARSET) for x in range(ROOT_ID_SIZE)) + + +def make_random_ou_id(root_id): + # The regex pattern for an organizational unit ID string requires "ou-" + # followed by from 4 to 32 lower-case letters or digits (the ID of the root + # that contains the OU) followed by a second "-" dash and from 8 to 32 + # additional lower-case letters or digits. + # e.g. ou-g8sd-5oe3bjaw + return '-'.join([ + 'ou', + root_id.partition('-')[2], + ''.join(random.choice(CHARSET) for x in range(OU_ID_SUFFIX_SIZE)), + ]) + + +def make_random_account_id(): + # The regex pattern for an account ID string requires exactly 12 digits. + # e.g. '488633172133' + return ''.join([random.choice(string.digits) for n in range(ACCOUNT_ID_SIZE)]) + + +def make_random_create_account_status_id(): + # The regex pattern for an create account request ID string requires + # "car-" followed by from 8 to 32 lower-case letters or digits. + # e.g. 'car-35gxzwrp' + return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE)) + + +def make_random_service_control_policy_id(): + # The regex pattern for a policy ID string requires "p-" followed by + # from 8 to 128 lower-case letters or digits. + # e.g. 'p-k2av4a8a' + return 'p-' + ''.join(random.choice(CHARSET) for x in range(SCP_ID_SIZE)) diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index e0f3a7e69..f94723017 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -85,6 +85,7 @@ old_socksocket = None old_ssl_wrap_socket = None old_sslwrap_simple = None old_sslsocket = None +old_sslcontext_wrap_socket = None if PY3: # pragma: no cover basestring = (bytes, str) @@ -100,6 +101,10 @@ try: # pragma: no cover if not PY3: old_sslwrap_simple = ssl.sslwrap_simple old_sslsocket = ssl.SSLSocket + try: + old_sslcontext_wrap_socket = ssl.SSLContext.wrap_socket + except AttributeError: + pass except ImportError: # pragma: no cover ssl = None @@ -263,10 +268,26 @@ class fakesock(object): _sent_data = [] def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, - protocol=0): - self.truesock = (old_socket(family, type, protocol) - if httpretty.allow_net_connect - else None) + proto=0, fileno=None, _sock=None): + """ + Matches both the Python 2 API: + def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None): + https://github.com/python/cpython/blob/2.7/Lib/socket.py + + and the Python 3 API: + def __init__(self, family=-1, type=-1, proto=-1, fileno=None): + https://github.com/python/cpython/blob/3.5/Lib/socket.py + """ + if httpretty.allow_net_connect: + if PY3: + self.truesock = old_socket(family, type, proto, fileno) + else: + # If Python 2, if parameters are passed as arguments, instead of kwargs, + # the 4th argument `_sock` will be interpreted as the `fileno`. + # Check if _sock is none, and if so, pass fileno. + self.truesock = old_socket(family, type, proto, fileno or _sock) + else: + self.truesock = None self._closed = True self.fd = FakeSockFile() self.fd.socket = self @@ -281,7 +302,7 @@ class fakesock(object): return { 'notAfter': shift.strftime('%b %d %H:%M:%S GMT'), 'subjectAltName': ( - ('DNS', '*%s' % self._host), + ('DNS', '*.%s' % self._host), ('DNS', self._host), ('DNS', '*'), ), @@ -772,7 +793,7 @@ class URIMatcher(object): def __init__(self, uri, entries, match_querystring=False): self._match_querystring = match_querystring - if type(uri).__name__ == 'SRE_Pattern': + if type(uri).__name__ in ('SRE_Pattern', 'Pattern'): self.regex = uri result = urlsplit(uri.pattern) if result.scheme == 'https': @@ -1012,6 +1033,10 @@ class httpretty(HttpBaseClass): if ssl: ssl.wrap_socket = old_ssl_wrap_socket ssl.SSLSocket = old_sslsocket + try: + ssl.SSLContext.wrap_socket = old_sslcontext_wrap_socket + except AttributeError: + pass ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket ssl.__dict__['SSLSocket'] = old_sslsocket @@ -1058,6 +1083,14 @@ class httpretty(HttpBaseClass): ssl.wrap_socket = fake_wrap_socket ssl.SSLSocket = FakeSSLSocket + try: + def fake_sslcontext_wrap_socket(cls, *args, **kwargs): + return fake_wrap_socket(*args, **kwargs) + + ssl.SSLContext.wrap_socket = fake_sslcontext_wrap_socket + except AttributeError: + pass + ssl.__dict__['wrap_socket'] = fake_wrap_socket ssl.__dict__['SSLSocket'] = FakeSSLSocket @@ -1096,4 +1129,4 @@ def httprettified(test): if isinstance(test, ClassTypes): return decorate_class(test) - return decorate_callable(test) + return decorate_callable(test) \ No newline at end of file diff --git a/moto/packages/httpretty/http.py b/moto/packages/httpretty/http.py index 7e9a56885..ee1625905 100644 --- a/moto/packages/httpretty/http.py +++ b/moto/packages/httpretty/http.py @@ -29,7 +29,6 @@ import re from .compat import BaseClass from .utils import decode_utf8 - STATUSES = { 100: "Continue", 101: "Switching Protocols", diff --git a/moto/packages/responses/.gitignore b/moto/packages/responses/.gitignore deleted file mode 100644 index 5d4406b8d..000000000 --- a/moto/packages/responses/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -.arcconfig -.coverage -.DS_Store -.idea -*.db -*.egg-info -*.pyc -/htmlcov -/dist -/build -/.cache -/.tox diff --git a/moto/packages/responses/.travis.yml b/moto/packages/responses/.travis.yml deleted file mode 100644 index 9ab219db0..000000000 --- a/moto/packages/responses/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: python -sudo: false -python: - - "2.6" - - "2.7" - - "3.3" - - "3.4" - - "3.5" -cache: - directories: - - .pip_download_cache -env: - matrix: - - REQUESTS=requests==2.0 - - REQUESTS=-U requests - - REQUESTS="-e git+git://github.com/kennethreitz/requests.git#egg=requests" - global: - - PIP_DOWNLOAD_CACHE=".pip_download_cache" -matrix: - allow_failures: - - env: 'REQUESTS="-e git+git://github.com/kennethreitz/requests.git#egg=requests"' -install: - - "pip install ${REQUESTS}" - - make develop -script: - - if [[ $TRAVIS_PYTHON_VERSION != 2.6 ]]; then make lint; fi - - py.test . --cov responses --cov-report term-missing diff --git a/moto/packages/responses/CHANGES b/moto/packages/responses/CHANGES deleted file mode 100644 index 1bfd7ead8..000000000 --- a/moto/packages/responses/CHANGES +++ /dev/null @@ -1,32 +0,0 @@ -Unreleased ----------- - -- Allow empty list/dict as json object (GH-100) - -0.5.1 ------ - -- Add LICENSE, README and CHANGES to the PyPI distribution (GH-97). - -0.5.0 ------ - -- Allow passing a JSON body to `response.add` (GH-82) -- Improve ConnectionError emulation (GH-73) -- Correct assertion in assert_all_requests_are_fired (GH-71) - -0.4.0 ------ - -- Requests 2.0+ is required -- Mocking now happens on the adapter instead of the session - -0.3.0 ------ - -- Add the ability to mock errors (GH-22) -- Add responses.mock context manager (GH-36) -- Support custom adapters (GH-33) -- Add support for regexp error matching (GH-25) -- Add support for dynamic bodies via `responses.add_callback` (GH-24) -- Preserve argspec when using `responses.activate` decorator (GH-18) diff --git a/moto/packages/responses/LICENSE b/moto/packages/responses/LICENSE deleted file mode 100644 index 52b44b20a..000000000 --- a/moto/packages/responses/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2015 David Cramer - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/moto/packages/responses/MANIFEST.in b/moto/packages/responses/MANIFEST.in deleted file mode 100644 index ef901684c..000000000 --- a/moto/packages/responses/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include README.rst CHANGES LICENSE -global-exclude *~ diff --git a/moto/packages/responses/Makefile b/moto/packages/responses/Makefile deleted file mode 100644 index 9da42c6d1..000000000 --- a/moto/packages/responses/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -develop: - pip install -e . - make install-test-requirements - -install-test-requirements: - pip install "file://`pwd`#egg=responses[tests]" - -test: develop lint - @echo "Running Python tests" - py.test . - @echo "" - -lint: - @echo "Linting Python files" - PYFLAKES_NODOCTEST=1 flake8 . - @echo "" diff --git a/moto/packages/responses/README.rst b/moto/packages/responses/README.rst deleted file mode 100644 index 5f946fcde..000000000 --- a/moto/packages/responses/README.rst +++ /dev/null @@ -1,190 +0,0 @@ -Responses -========= - -.. image:: https://travis-ci.org/getsentry/responses.svg?branch=master - :target: https://travis-ci.org/getsentry/responses - -A utility library for mocking out the `requests` Python library. - -.. note:: Responses requires Requests >= 2.0 - -Response body as string ------------------------ - -.. code-block:: python - - import responses - import requests - - @responses.activate - def test_my_api(): - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{"error": "not found"}', status=404, - content_type='application/json') - - resp = requests.get('http://twitter.com/api/1/foobar') - - assert resp.json() == {"error": "not found"} - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar' - assert responses.calls[0].response.text == '{"error": "not found"}' - -You can also specify a JSON object instead of a body string. - -.. code-block:: python - - import responses - import requests - - @responses.activate - def test_my_api(): - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - json={"error": "not found"}, status=404) - - resp = requests.get('http://twitter.com/api/1/foobar') - - assert resp.json() == {"error": "not found"} - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar' - assert responses.calls[0].response.text == '{"error": "not found"}' - -Request callback ----------------- - -.. code-block:: python - - import json - - import responses - import requests - - @responses.activate - def test_calc_api(): - - def request_callback(request): - payload = json.loads(request.body) - resp_body = {'value': sum(payload['numbers'])} - headers = {'request-id': '728d329e-0e86-11e4-a748-0c84dc037c13'} - return (200, headers, json.dumps(resp_body)) - - responses.add_callback( - responses.POST, 'http://calc.com/sum', - callback=request_callback, - content_type='application/json', - ) - - resp = requests.post( - 'http://calc.com/sum', - json.dumps({'numbers': [1, 2, 3]}), - headers={'content-type': 'application/json'}, - ) - - assert resp.json() == {'value': 6} - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://calc.com/sum' - assert responses.calls[0].response.text == '{"value": 6}' - assert ( - responses.calls[0].response.headers['request-id'] == - '728d329e-0e86-11e4-a748-0c84dc037c13' - ) - -Instead of passing a string URL into `responses.add` or `responses.add_callback` -you can also supply a compiled regular expression. - -.. code-block:: python - - import re - import responses - import requests - - # Instead of - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{"error": "not found"}', status=404, - content_type='application/json') - - # You can do the following - url_re = re.compile(r'https?://twitter\.com/api/\d+/foobar') - responses.add(responses.GET, url_re, - body='{"error": "not found"}', status=404, - content_type='application/json') - -A response can also throw an exception as follows. - -.. code-block:: python - - import responses - import requests - from requests.exceptions import HTTPError - - exception = HTTPError('Something went wrong') - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - body=exception) - # All calls to 'http://twitter.com/api/1/foobar' will throw exception. - - -Responses as a context manager ------------------------------- - -.. code-block:: python - - import responses - import requests - - - def test_my_api(): - with responses.RequestsMock() as rsps: - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{}', status=200, - content_type='application/json') - resp = requests.get('http://twitter.com/api/1/foobar') - - assert resp.status_code == 200 - - # outside the context manager requests will hit the remote server - resp = requests.get('http://twitter.com/api/1/foobar') - resp.status_code == 404 - - -Assertions on declared responses --------------------------------- - -When used as a context manager, Responses will, by default, raise an assertion -error if a url was registered but not accessed. This can be disabled by passing -the ``assert_all_requests_are_fired`` value: - -.. code-block:: python - - import responses - import requests - - - def test_my_api(): - with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{}', status=200, - content_type='application/json') - -Multiple Responses ------------------- -You can also use ``assert_all_requests_are_fired`` to add multiple responses for the same url: - -.. code-block:: python - - import responses - import requests - - - def test_my_api(): - with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps: - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', status=500) - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{}', status=200, - content_type='application/json') - - resp = requests.get('http://twitter.com/api/1/foobar') - assert resp.status_code == 500 - resp = requests.get('http://twitter.com/api/1/foobar') - assert resp.status_code == 200 diff --git a/moto/packages/responses/responses.py b/moto/packages/responses/responses.py deleted file mode 100644 index 3bc437f0b..000000000 --- a/moto/packages/responses/responses.py +++ /dev/null @@ -1,330 +0,0 @@ -from __future__ import ( - absolute_import, print_function, division, unicode_literals -) - -import inspect -import json as json_module -import re -import six - -from collections import namedtuple, Sequence, Sized -from functools import update_wrapper -from cookies import Cookies -from requests.adapters import HTTPAdapter -from requests.utils import cookiejar_from_dict -from requests.exceptions import ConnectionError -from requests.sessions import REDIRECT_STATI - -try: - from requests.packages.urllib3.response import HTTPResponse -except ImportError: - from urllib3.response import HTTPResponse - -if six.PY2: - from urlparse import urlparse, parse_qsl -else: - from urllib.parse import urlparse, parse_qsl - -if six.PY2: - try: - from six import cStringIO as BufferIO - except ImportError: - from six import StringIO as BufferIO -else: - from io import BytesIO as BufferIO - - -Call = namedtuple('Call', ['request', 'response']) - -_wrapper_template = """\ -def wrapper%(signature)s: - with responses: - return func%(funcargs)s -""" - - -def _is_string(s): - return isinstance(s, (six.string_types, six.text_type)) - - -def _is_redirect(response): - try: - # 2.0.0 <= requests <= 2.2 - return response.is_redirect - except AttributeError: - # requests > 2.2 - return ( - # use request.sessions conditional - response.status_code in REDIRECT_STATI and - 'location' in response.headers - ) - - -def get_wrapped(func, wrapper_template, evaldict): - # Preserve the argspec for the wrapped function so that testing - # tools such as pytest can continue to use their fixture injection. - args, a, kw, defaults = inspect.getargspec(func) - - signature = inspect.formatargspec(args, a, kw, defaults) - is_bound_method = hasattr(func, '__self__') - if is_bound_method: - args = args[1:] # Omit 'self' - callargs = inspect.formatargspec(args, a, kw, None) - - ctx = {'signature': signature, 'funcargs': callargs} - six.exec_(wrapper_template % ctx, evaldict) - - wrapper = evaldict['wrapper'] - - update_wrapper(wrapper, func) - if is_bound_method: - wrapper = wrapper.__get__(func.__self__, type(func.__self__)) - return wrapper - - -class CallList(Sequence, Sized): - - def __init__(self): - self._calls = [] - - def __iter__(self): - return iter(self._calls) - - def __len__(self): - return len(self._calls) - - def __getitem__(self, idx): - return self._calls[idx] - - def add(self, request, response): - self._calls.append(Call(request, response)) - - def reset(self): - self._calls = [] - - -def _ensure_url_default_path(url, match_querystring): - if _is_string(url) and url.count('/') == 2: - if match_querystring: - return url.replace('?', '/?', 1) - else: - return url + '/' - return url - - -class RequestsMock(object): - DELETE = 'DELETE' - GET = 'GET' - HEAD = 'HEAD' - OPTIONS = 'OPTIONS' - PATCH = 'PATCH' - POST = 'POST' - PUT = 'PUT' - - def __init__(self, assert_all_requests_are_fired=True, pass_through=True): - self._calls = CallList() - self.reset() - self.assert_all_requests_are_fired = assert_all_requests_are_fired - self.pass_through = pass_through - self.original_send = HTTPAdapter.send - - def reset(self): - self._urls = [] - self._calls.reset() - - def add(self, method, url, body='', match_querystring=False, - status=200, adding_headers=None, stream=False, - content_type='text/plain', json=None): - - # if we were passed a `json` argument, - # override the body and content_type - if json is not None: - body = json_module.dumps(json) - content_type = 'application/json' - - # ensure the url has a default path set if the url is a string - url = _ensure_url_default_path(url, match_querystring) - - # body must be bytes - if isinstance(body, six.text_type): - body = body.encode('utf-8') - - self._urls.append({ - 'url': url, - 'method': method, - 'body': body, - 'content_type': content_type, - 'match_querystring': match_querystring, - 'status': status, - 'adding_headers': adding_headers, - 'stream': stream, - }) - - def add_callback(self, method, url, callback, match_querystring=False, - content_type='text/plain'): - # ensure the url has a default path set if the url is a string - # url = _ensure_url_default_path(url, match_querystring) - - self._urls.append({ - 'url': url, - 'method': method, - 'callback': callback, - 'content_type': content_type, - 'match_querystring': match_querystring, - }) - - @property - def calls(self): - return self._calls - - def __enter__(self): - self.start() - return self - - def __exit__(self, type, value, traceback): - success = type is None - self.stop(allow_assert=success) - self.reset() - return success - - def activate(self, func): - evaldict = {'responses': self, 'func': func} - return get_wrapped(func, _wrapper_template, evaldict) - - def _find_match(self, request): - for match in self._urls: - if request.method != match['method']: - continue - - if not self._has_url_match(match, request.url): - continue - - break - else: - return None - if self.assert_all_requests_are_fired: - # for each found match remove the url from the stack - self._urls.remove(match) - return match - - def _has_url_match(self, match, request_url): - url = match['url'] - - if not match['match_querystring']: - request_url = request_url.split('?', 1)[0] - - if _is_string(url): - if match['match_querystring']: - return self._has_strict_url_match(url, request_url) - else: - return url == request_url - elif isinstance(url, re._pattern_type) and url.match(request_url): - return True - else: - return False - - def _has_strict_url_match(self, url, other): - url_parsed = urlparse(url) - other_parsed = urlparse(other) - - if url_parsed[:3] != other_parsed[:3]: - return False - - url_qsl = sorted(parse_qsl(url_parsed.query)) - other_qsl = sorted(parse_qsl(other_parsed.query)) - return url_qsl == other_qsl - - def _on_request(self, adapter, request, **kwargs): - match = self._find_match(request) - # TODO(dcramer): find the correct class for this - if match is None: - if self.pass_through: - return self.original_send(adapter, request, **kwargs) - - error_msg = 'Connection refused: {0} {1}'.format(request.method, - request.url) - response = ConnectionError(error_msg) - response.request = request - - self._calls.add(request, response) - raise response - - if 'body' in match and isinstance(match['body'], Exception): - self._calls.add(request, match['body']) - raise match['body'] - - headers = {} - if match['content_type'] is not None: - headers['Content-Type'] = match['content_type'] - - if 'callback' in match: # use callback - status, r_headers, body = match['callback'](request) - if isinstance(body, six.text_type): - body = body.encode('utf-8') - body = BufferIO(body) - headers.update(r_headers) - - elif 'body' in match: - if match['adding_headers']: - headers.update(match['adding_headers']) - status = match['status'] - body = BufferIO(match['body']) - - response = HTTPResponse( - status=status, - reason=six.moves.http_client.responses[status], - body=body, - headers=headers, - preload_content=False, - # Need to not decode_content to mimic requests - decode_content=False, - ) - - response = adapter.build_response(request, response) - if not match.get('stream'): - response.content # NOQA - - try: - resp_cookies = Cookies.from_request(response.headers['set-cookie']) - response.cookies = cookiejar_from_dict(dict( - (v.name, v.value) - for _, v - in resp_cookies.items() - )) - except (KeyError, TypeError): - pass - - self._calls.add(request, response) - - return response - - def start(self): - try: - from unittest import mock - except ImportError: - import mock - - def unbound_on_send(adapter, request, *a, **kwargs): - return self._on_request(adapter, request, *a, **kwargs) - self._patcher1 = mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send', - unbound_on_send) - self._patcher1.start() - self._patcher2 = mock.patch('requests.adapters.HTTPAdapter.send', - unbound_on_send) - self._patcher2.start() - - def stop(self, allow_assert=True): - self._patcher1.stop() - self._patcher2.stop() - if allow_assert and self.assert_all_requests_are_fired and self._urls: - raise AssertionError( - 'Not all requests have been executed {0!r}'.format( - [(url['method'], url['url']) for url in self._urls])) - - -# expose default mock namespace -mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False, pass_through=False) -__all__ = [] -for __attr in (a for a in dir(_default_mock) if not a.startswith('_')): - __all__.append(__attr) - globals()[__attr] = getattr(_default_mock, __attr) diff --git a/moto/packages/responses/setup.cfg b/moto/packages/responses/setup.cfg deleted file mode 100644 index 9b6594f2e..000000000 --- a/moto/packages/responses/setup.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[pytest] -addopts=--tb=short - -[bdist_wheel] -universal=1 diff --git a/moto/packages/responses/setup.py b/moto/packages/responses/setup.py deleted file mode 100644 index 911c07da4..000000000 --- a/moto/packages/responses/setup.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python -""" -responses -========= - -A utility library for mocking out the `requests` Python library. - -:copyright: (c) 2015 David Cramer -:license: Apache 2.0 -""" - -import sys -import logging - -from setuptools import setup -from setuptools.command.test import test as TestCommand -import pkg_resources - - -setup_requires = [] - -if 'test' in sys.argv: - setup_requires.append('pytest') - -install_requires = [ - 'requests>=2.0', - 'cookies', - 'six', -] - -tests_require = [ - 'pytest', - 'coverage >= 3.7.1, < 5.0.0', - 'pytest-cov', - 'flake8', -] - - -extras_require = { - ':python_version in "2.6, 2.7, 3.2"': ['mock'], - 'tests': tests_require, -} - -try: - if 'bdist_wheel' not in sys.argv: - for key, value in extras_require.items(): - if key.startswith(':') and pkg_resources.evaluate_marker(key[1:]): - install_requires.extend(value) -except Exception: - logging.getLogger(__name__).exception( - 'Something went wrong calculating platform specific dependencies, so ' - "you're getting them all!" - ) - for key, value in extras_require.items(): - if key.startswith(':'): - install_requires.extend(value) - - -class PyTest(TestCommand): - - def finalize_options(self): - TestCommand.finalize_options(self) - self.test_args = ['test_responses.py'] - self.test_suite = True - - def run_tests(self): - # import here, cause outside the eggs aren't loaded - import pytest - errno = pytest.main(self.test_args) - sys.exit(errno) - - -setup( - name='responses', - version='0.6.0', - author='David Cramer', - description=( - 'A utility library for mocking out the `requests` Python library.' - ), - url='https://github.com/getsentry/responses', - license='Apache 2.0', - long_description=open('README.rst').read(), - py_modules=['responses', 'test_responses'], - zip_safe=False, - install_requires=install_requires, - extras_require=extras_require, - tests_require=tests_require, - setup_requires=setup_requires, - cmdclass={'test': PyTest}, - include_package_data=True, - classifiers=[ - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 3', - 'Topic :: Software Development' - ], -) diff --git a/moto/packages/responses/test_responses.py b/moto/packages/responses/test_responses.py deleted file mode 100644 index 967a535cf..000000000 --- a/moto/packages/responses/test_responses.py +++ /dev/null @@ -1,444 +0,0 @@ -from __future__ import ( - absolute_import, print_function, division, unicode_literals -) - -import re -import requests -import responses -import pytest - -from inspect import getargspec -from requests.exceptions import ConnectionError, HTTPError - - -def assert_reset(): - assert len(responses._default_mock._urls) == 0 - assert len(responses.calls) == 0 - - -def assert_response(resp, body=None, content_type='text/plain'): - assert resp.status_code == 200 - assert resp.reason == 'OK' - if content_type is not None: - assert resp.headers['Content-Type'] == content_type - else: - assert 'Content-Type' not in resp.headers - assert resp.text == body - - -def test_response(): - @responses.activate - def run(): - responses.add(responses.GET, 'http://example.com', body=b'test') - resp = requests.get('http://example.com') - assert_response(resp, 'test') - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://example.com/' - assert responses.calls[0].response.content == b'test' - - resp = requests.get('http://example.com?foo=bar') - assert_response(resp, 'test') - assert len(responses.calls) == 2 - assert responses.calls[1].request.url == 'http://example.com/?foo=bar' - assert responses.calls[1].response.content == b'test' - - run() - assert_reset() - - -def test_connection_error(): - @responses.activate - def run(): - responses.add(responses.GET, 'http://example.com') - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo') - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://example.com/foo' - assert type(responses.calls[0].response) is ConnectionError - assert responses.calls[0].response.request - - run() - assert_reset() - - -def test_match_querystring(): - @responses.activate - def run(): - url = 'http://example.com?test=1&foo=bar' - responses.add( - responses.GET, url, - match_querystring=True, body=b'test') - resp = requests.get('http://example.com?test=1&foo=bar') - assert_response(resp, 'test') - resp = requests.get('http://example.com?foo=bar&test=1') - assert_response(resp, 'test') - - run() - assert_reset() - - -def test_match_querystring_error(): - @responses.activate - def run(): - responses.add( - responses.GET, 'http://example.com/?test=1', - match_querystring=True) - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo/?test=2') - - run() - assert_reset() - - -def test_match_querystring_regex(): - @responses.activate - def run(): - """Note that `match_querystring` value shouldn't matter when passing a - regular expression""" - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=1'), - body='test1', match_querystring=True) - - resp = requests.get('http://example.com/foo/?test=1') - assert_response(resp, 'test1') - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=2'), - body='test2', match_querystring=False) - - resp = requests.get('http://example.com/foo/?test=2') - assert_response(resp, 'test2') - - run() - assert_reset() - - -def test_match_querystring_error_regex(): - @responses.activate - def run(): - """Note that `match_querystring` value shouldn't matter when passing a - regular expression""" - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=1'), - match_querystring=True) - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo/?test=3') - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=2'), - match_querystring=False) - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo/?test=4') - - run() - assert_reset() - - -def test_accept_string_body(): - @responses.activate - def run(): - url = 'http://example.com/' - responses.add( - responses.GET, url, body='test') - resp = requests.get(url) - assert_response(resp, 'test') - - run() - assert_reset() - - -def test_accept_json_body(): - @responses.activate - def run(): - content_type = 'application/json' - - url = 'http://example.com/' - responses.add( - responses.GET, url, json={"message": "success"}) - resp = requests.get(url) - assert_response(resp, '{"message": "success"}', content_type) - - url = 'http://example.com/1/' - responses.add(responses.GET, url, json=[]) - resp = requests.get(url) - assert_response(resp, '[]', content_type) - - run() - assert_reset() - - -def test_no_content_type(): - @responses.activate - def run(): - url = 'http://example.com/' - responses.add( - responses.GET, url, body='test', content_type=None) - resp = requests.get(url) - assert_response(resp, 'test', content_type=None) - - run() - assert_reset() - - -def test_throw_connection_error_explicit(): - @responses.activate - def run(): - url = 'http://example.com' - exception = HTTPError('HTTP Error') - responses.add( - responses.GET, url, exception) - - with pytest.raises(HTTPError) as HE: - requests.get(url) - - assert str(HE.value) == 'HTTP Error' - - run() - assert_reset() - - -def test_callback(): - body = b'test callback' - status = 400 - reason = 'Bad Request' - headers = {'foo': 'bar'} - url = 'http://example.com/' - - def request_callback(request): - return (status, headers, body) - - @responses.activate - def run(): - responses.add_callback(responses.GET, url, request_callback) - resp = requests.get(url) - assert resp.text == "test callback" - assert resp.status_code == status - assert resp.reason == reason - assert 'foo' in resp.headers - assert resp.headers['foo'] == 'bar' - - run() - assert_reset() - - -def test_callback_no_content_type(): - body = b'test callback' - status = 400 - reason = 'Bad Request' - headers = {'foo': 'bar'} - url = 'http://example.com/' - - def request_callback(request): - return (status, headers, body) - - @responses.activate - def run(): - responses.add_callback( - responses.GET, url, request_callback, content_type=None) - resp = requests.get(url) - assert resp.text == "test callback" - assert resp.status_code == status - assert resp.reason == reason - assert 'foo' in resp.headers - assert 'Content-Type' not in resp.headers - - run() - assert_reset() - - -def test_regular_expression_url(): - @responses.activate - def run(): - url = re.compile(r'https?://(.*\.)?example.com') - responses.add(responses.GET, url, body=b'test') - - resp = requests.get('http://example.com') - assert_response(resp, 'test') - - resp = requests.get('https://example.com') - assert_response(resp, 'test') - - resp = requests.get('https://uk.example.com') - assert_response(resp, 'test') - - with pytest.raises(ConnectionError): - requests.get('https://uk.exaaample.com') - - run() - assert_reset() - - -def test_custom_adapter(): - @responses.activate - def run(): - url = "http://example.com" - responses.add(responses.GET, url, body=b'test') - - calls = [0] - - class DummyAdapter(requests.adapters.HTTPAdapter): - - def send(self, *a, **k): - calls[0] += 1 - return super(DummyAdapter, self).send(*a, **k) - - # Test that the adapter is actually used - session = requests.Session() - session.mount("http://", DummyAdapter()) - - resp = session.get(url, allow_redirects=False) - assert calls[0] == 1 - - # Test that the response is still correctly emulated - session = requests.Session() - session.mount("http://", DummyAdapter()) - - resp = session.get(url) - assert_response(resp, 'test') - - run() - - -def test_responses_as_context_manager(): - def run(): - with responses.mock: - responses.add(responses.GET, 'http://example.com', body=b'test') - resp = requests.get('http://example.com') - assert_response(resp, 'test') - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://example.com/' - assert responses.calls[0].response.content == b'test' - - resp = requests.get('http://example.com?foo=bar') - assert_response(resp, 'test') - assert len(responses.calls) == 2 - assert (responses.calls[1].request.url == - 'http://example.com/?foo=bar') - assert responses.calls[1].response.content == b'test' - - run() - assert_reset() - - -def test_activate_doesnt_change_signature(): - def test_function(a, b=None): - return (a, b) - - decorated_test_function = responses.activate(test_function) - assert getargspec(test_function) == getargspec(decorated_test_function) - assert decorated_test_function(1, 2) == test_function(1, 2) - assert decorated_test_function(3) == test_function(3) - - -def test_activate_doesnt_change_signature_for_method(): - class TestCase(object): - - def test_function(self, a, b=None): - return (self, a, b) - - test_case = TestCase() - argspec = getargspec(test_case.test_function) - decorated_test_function = responses.activate(test_case.test_function) - assert argspec == getargspec(decorated_test_function) - assert decorated_test_function(1, 2) == test_case.test_function(1, 2) - assert decorated_test_function(3) == test_case.test_function(3) - - -def test_response_cookies(): - body = b'test callback' - status = 200 - headers = {'set-cookie': 'session_id=12345; a=b; c=d'} - url = 'http://example.com/' - - def request_callback(request): - return (status, headers, body) - - @responses.activate - def run(): - responses.add_callback(responses.GET, url, request_callback) - resp = requests.get(url) - assert resp.text == "test callback" - assert resp.status_code == status - assert 'session_id' in resp.cookies - assert resp.cookies['session_id'] == '12345' - assert resp.cookies['a'] == 'b' - assert resp.cookies['c'] == 'd' - run() - assert_reset() - - -def test_assert_all_requests_are_fired(): - def run(): - with pytest.raises(AssertionError) as excinfo: - with responses.RequestsMock( - assert_all_requests_are_fired=True) as m: - m.add(responses.GET, 'http://example.com', body=b'test') - assert 'http://example.com' in str(excinfo.value) - assert responses.GET in str(excinfo) - - # check that assert_all_requests_are_fired default to True - with pytest.raises(AssertionError): - with responses.RequestsMock() as m: - m.add(responses.GET, 'http://example.com', body=b'test') - - # check that assert_all_requests_are_fired doesn't swallow exceptions - with pytest.raises(ValueError): - with responses.RequestsMock() as m: - m.add(responses.GET, 'http://example.com', body=b'test') - raise ValueError() - - run() - assert_reset() - - -def test_allow_redirects_samehost(): - redirecting_url = 'http://example.com' - final_url_path = '/1' - final_url = '{0}{1}'.format(redirecting_url, final_url_path) - url_re = re.compile(r'^http://example.com(/)?(\d+)?$') - - def request_callback(request): - # endpoint of chained redirect - if request.url.endswith(final_url_path): - return 200, (), b'test' - # otherwise redirect to an integer path - else: - if request.url.endswith('/0'): - n = 1 - else: - n = 0 - redirect_headers = {'location': '/{0!s}'.format(n)} - return 301, redirect_headers, None - - def run(): - # setup redirect - with responses.mock: - responses.add_callback(responses.GET, url_re, request_callback) - resp_no_redirects = requests.get(redirecting_url, - allow_redirects=False) - assert resp_no_redirects.status_code == 301 - assert len(responses.calls) == 1 # 1x300 - assert responses.calls[0][1].status_code == 301 - assert_reset() - - with responses.mock: - responses.add_callback(responses.GET, url_re, request_callback) - resp_yes_redirects = requests.get(redirecting_url, - allow_redirects=True) - assert len(responses.calls) == 3 # 2x300 + 1x200 - assert len(resp_yes_redirects.history) == 2 - assert resp_yes_redirects.status_code == 200 - assert final_url == resp_yes_redirects.url - status_codes = [call[1].status_code for call in responses.calls] - assert status_codes == [301, 301, 200] - assert_reset() - - run() - assert_reset() diff --git a/moto/packages/responses/tox.ini b/moto/packages/responses/tox.ini deleted file mode 100644 index 0a31c03ab..000000000 --- a/moto/packages/responses/tox.ini +++ /dev/null @@ -1,11 +0,0 @@ - -[tox] -envlist = {py26,py27,py32,py33,py34,py35} - -[testenv] -deps = - pytest - pytest-cov - pytest-flakes -commands = - py.test . --cov responses --cov-report term-missing --flakes diff --git a/moto/rds/models.py b/moto/rds/models.py index 77deff09d..feecefe0c 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -48,6 +48,10 @@ class Database(BaseModel): if self.publicly_accessible is None: self.publicly_accessible = True + self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot") + if self.copy_tags_to_snapshot is None: + self.copy_tags_to_snapshot = False + self.backup_retention_period = kwargs.get("backup_retention_period") if self.backup_retention_period is None: self.backup_retention_period = 1 @@ -137,6 +141,7 @@ class Database(BaseModel): "multi_az": properties.get("MultiAZ"), "port": properties.get('Port', 3306), "publicly_accessible": properties.get("PubliclyAccessible"), + "copy_tags_to_snapshot": properties.get("CopyTagsToSnapshot"), "region": region_name, "security_groups": security_groups, "storage_encrypted": properties.get("StorageEncrypted"), @@ -217,6 +222,7 @@ class Database(BaseModel): {% endif %} {{ database.publicly_accessible }} + {{ database.copy_tags_to_snapshot }} {{ database.auto_minor_version_upgrade }} {{ database.allocated_storage }} {{ database.storage_encrypted }} diff --git a/moto/rds/responses.py b/moto/rds/responses.py index 0895a8bf2..0afb03979 100644 --- a/moto/rds/responses.py +++ b/moto/rds/responses.py @@ -95,7 +95,7 @@ class RDSResponse(BaseResponse): start = all_ids.index(marker) + 1 else: start = 0 - page_size = self._get_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier + page_size = self._get_int_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier instances_resp = all_instances[start:start + page_size] next_marker = None if len(all_instances) > start + page_size: @@ -107,6 +107,9 @@ class RDSResponse(BaseResponse): def modify_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_kwargs = self._get_db_kwargs() + new_db_instance_identifier = self._get_param('NewDBInstanceIdentifier') + if new_db_instance_identifier: + db_kwargs['new_db_instance_identifier'] = new_db_instance_identifier database = self.backend.modify_database( db_instance_identifier, db_kwargs) template = self.response_template(MODIFY_DATABASE_TEMPLATE) diff --git a/moto/rds2/exceptions.py b/moto/rds2/exceptions.py index 0e716310e..e82ae7077 100644 --- a/moto/rds2/exceptions.py +++ b/moto/rds2/exceptions.py @@ -60,6 +60,15 @@ class DBParameterGroupNotFoundError(RDSClientError): 'DB Parameter Group {0} not found.'.format(db_parameter_group_name)) +class OptionGroupNotFoundFaultError(RDSClientError): + + def __init__(self, option_group_name): + super(OptionGroupNotFoundFaultError, self).__init__( + 'OptionGroupNotFoundFault', + 'Specified OptionGroupName: {0} not found.'.format(option_group_name) + ) + + class InvalidDBClusterStateFaultError(RDSClientError): def __init__(self, database_identifier): diff --git a/moto/rds2/models.py b/moto/rds2/models.py index cf83733ce..4c0daa230 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -20,6 +20,7 @@ from .exceptions import (RDSClientError, DBSecurityGroupNotFoundError, DBSubnetGroupNotFoundError, DBParameterGroupNotFoundError, + OptionGroupNotFoundFaultError, InvalidDBClusterStateFaultError, InvalidDBInstanceStateError, SnapshotQuotaExceededError, @@ -70,9 +71,13 @@ class Database(BaseModel): self.port = Database.default_port(self.engine) self.db_instance_identifier = kwargs.get('db_instance_identifier') self.db_name = kwargs.get("db_name") + self.instance_create_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) self.publicly_accessible = kwargs.get("publicly_accessible") if self.publicly_accessible is None: self.publicly_accessible = True + self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot") + if self.copy_tags_to_snapshot is None: + self.copy_tags_to_snapshot = False self.backup_retention_period = kwargs.get("backup_retention_period") if self.backup_retention_period is None: self.backup_retention_period = 1 @@ -96,6 +101,8 @@ class Database(BaseModel): 'preferred_backup_window', '13:14-13:44') self.license_model = kwargs.get('license_model', 'general-public-license') self.option_group_name = kwargs.get('option_group_name', None) + if self.option_group_name and self.option_group_name not in rds2_backends[self.region].option_groups: + raise OptionGroupNotFoundFaultError(self.option_group_name) self.default_option_groups = {"MySQL": "default.mysql5.6", "mysql": "default.mysql5.6", "postgres": "default.postgres9.3" @@ -103,6 +110,8 @@ class Database(BaseModel): if not self.option_group_name and self.engine in self.default_option_groups: self.option_group_name = self.default_option_groups[self.engine] self.character_set_name = kwargs.get('character_set_name', None) + self.iam_database_authentication_enabled = False + self.dbi_resource_id = "db-M5ENSHXFPU6XHZ4G4ZEI5QIO2U" self.tags = kwargs.get('tags', []) @property @@ -140,8 +149,17 @@ class Database(BaseModel): {{ database.status }} {% if database.db_name %}{{ database.db_name }}{% endif %} {{ database.multi_az }} - + + {% for vpc_security_group_id in database.vpc_security_group_ids %} + + active + {{ vpc_security_group_id }} + + {% endfor %} + {{ database.db_instance_identifier }} + {{ database.dbi_resource_id }} + {{ database.instance_create_time }} 03:50-04:20 wed:06:38-wed:07:08 @@ -163,9 +181,14 @@ class Database(BaseModel): {{ database.source_db_identifier }} {% endif %} {{ database.engine }} + {{database.iam_database_authentication_enabled }} {{ database.license_model }} {{ database.engine_version }} + + {{ database.option_group_name }} + in-sync + {% for db_parameter_group in database.db_parameter_groups() %} @@ -204,6 +227,7 @@ class Database(BaseModel): {% endif %} {{ database.publicly_accessible }} + {{ database.copy_tags_to_snapshot }} {{ database.auto_minor_version_upgrade }} {{ database.allocated_storage }} {{ database.storage_encrypted }} @@ -300,11 +324,13 @@ class Database(BaseModel): "db_parameter_group_name": properties.get('DBParameterGroupName'), "port": properties.get('Port', 3306), "publicly_accessible": properties.get("PubliclyAccessible"), + "copy_tags_to_snapshot": properties.get("CopyTagsToSnapshot"), "region": region_name, "security_groups": security_groups, "storage_encrypted": properties.get("StorageEncrypted"), "storage_type": properties.get("StorageType"), "tags": properties.get("Tags"), + "vpc_security_group_ids": properties.get('VpcSecurityGroupIds', []), } rds2_backend = rds2_backends[region_name] @@ -358,12 +384,13 @@ class Database(BaseModel): "PreferredBackupWindow": "{{ database.preferred_backup_window }}", "PreferredMaintenanceWindow": "{{ database.preferred_maintenance_window }}", "PubliclyAccessible": "{{ database.publicly_accessible }}", + "CopyTagsToSnapshot": "{{ database.copy_tags_to_snapshot }}", "AllocatedStorage": "{{ database.allocated_storage }}", "Endpoint": { "Address": "{{ database.address }}", "Port": "{{ database.port }}" }, - "InstanceCreateTime": null, + "InstanceCreateTime": "{{ database.instance_create_time }}", "Iops": null, "ReadReplicaDBInstanceIdentifiers": [{%- for replica in database.replicas -%} {%- if not loop.first -%},{%- endif -%} @@ -378,10 +405,12 @@ class Database(BaseModel): "SecondaryAvailabilityZone": null, "StatusInfos": null, "VpcSecurityGroups": [ + {% for vpc_security_group_id in database.vpc_security_group_ids %} { "Status": "active", - "VpcSecurityGroupId": "sg-123456" + "VpcSecurityGroupId": "{{ vpc_security_group_id }}" } + {% endfor %} ], "DBInstanceArn": "{{ database.db_instance_arn }}" }""") @@ -407,10 +436,10 @@ class Database(BaseModel): class Snapshot(BaseModel): - def __init__(self, database, snapshot_id, tags=None): + def __init__(self, database, snapshot_id, tags): self.database = database self.snapshot_id = snapshot_id - self.tags = tags or [] + self.tags = tags self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) @property @@ -452,6 +481,20 @@ class Snapshot(BaseModel): """) return template.render(snapshot=self, database=self.database) + def get_tags(self): + return self.tags + + def add_tags(self, tags): + new_keys = [tag_set['Key'] for tag_set in tags] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] + self.tags.extend(tags) + return self.tags + + def remove_tags(self, tag_keys): + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] + class SecurityGroup(BaseModel): @@ -687,6 +730,10 @@ class RDS2Backend(BaseBackend): raise DBSnapshotAlreadyExistsError(db_snapshot_identifier) if len(self.snapshots) >= int(os.environ.get('MOTO_RDS_SNAPSHOT_LIMIT', '100')): raise SnapshotQuotaExceededError() + if tags is None: + tags = list() + if database.copy_tags_to_snapshot and not tags: + tags = database.get_tags() snapshot = Snapshot(database, db_snapshot_identifier, tags) self.snapshots[db_snapshot_identifier] = snapshot return snapshot @@ -722,10 +769,11 @@ class RDS2Backend(BaseBackend): def describe_snapshots(self, db_instance_identifier, db_snapshot_identifier): if db_instance_identifier: + db_instance_snapshots = [] for snapshot in self.snapshots.values(): if snapshot.database.db_instance_identifier == db_instance_identifier: - return [snapshot] - raise DBSnapshotNotFoundError() + db_instance_snapshots.append(snapshot) + return db_instance_snapshots if db_snapshot_identifier: if db_snapshot_identifier in self.snapshots: @@ -736,6 +784,10 @@ class RDS2Backend(BaseBackend): def modify_database(self, db_instance_identifier, db_kwargs): database = self.describe_databases(db_instance_identifier)[0] + if 'new_db_instance_identifier' in db_kwargs: + del self.databases[db_instance_identifier] + db_instance_identifier = db_kwargs['db_instance_identifier'] = db_kwargs.pop('new_db_instance_identifier') + self.databases[db_instance_identifier] = database database.update(db_kwargs) return database @@ -753,13 +805,13 @@ class RDS2Backend(BaseBackend): raise InvalidDBInstanceStateError(db_instance_identifier, 'stop') if db_snapshot_identifier: self.create_snapshot(db_instance_identifier, db_snapshot_identifier) - database.status = 'shutdown' + database.status = 'stopped' return database def start_database(self, db_instance_identifier): database = self.describe_databases(db_instance_identifier)[0] # todo: bunch of different error messages to be generated from this api call - if database.status != 'shutdown': + if database.status != 'stopped': raise InvalidDBInstanceStateError(db_instance_identifier, 'start') database.status = 'available' return database @@ -778,13 +830,13 @@ class RDS2Backend(BaseBackend): def delete_database(self, db_instance_identifier, db_snapshot_name=None): if db_instance_identifier in self.databases: + if db_snapshot_name: + self.create_snapshot(db_instance_identifier, db_snapshot_name) database = self.databases.pop(db_instance_identifier) if database.is_replica: primary = self.find_db_from_id(database.source_db_identifier) primary.remove_replica(database) database.status = 'deleting' - if db_snapshot_name: - self.snapshots[db_snapshot_name] = Snapshot(database, db_snapshot_name) return database else: raise DBInstanceNotFoundError(db_instance_identifier) @@ -840,13 +892,16 @@ class RDS2Backend(BaseBackend): def create_option_group(self, option_group_kwargs): option_group_id = option_group_kwargs['name'] - valid_option_group_engines = {'mysql': ['5.6'], - 'oracle-se1': ['11.2'], - 'oracle-se': ['11.2'], - 'oracle-ee': ['11.2'], + valid_option_group_engines = {'mariadb': ['10.0', '10.1', '10.2', '10.3'], + 'mysql': ['5.5', '5.6', '5.7', '8.0'], + 'oracle-se2': ['11.2', '12.1', '12.2'], + 'oracle-se1': ['11.2', '12.1', '12.2'], + 'oracle-se': ['11.2', '12.1', '12.2'], + 'oracle-ee': ['11.2', '12.1', '12.2'], 'sqlserver-se': ['10.50', '11.00'], - 'sqlserver-ee': ['10.50', '11.00'] - } + 'sqlserver-ee': ['10.50', '11.00'], + 'sqlserver-ex': ['10.50', '11.00'], + 'sqlserver-web': ['10.50', '11.00']} if option_group_kwargs['name'] in self.option_groups: raise RDSClientError('OptionGroupAlreadyExistsFault', 'An option group named {0} already exists.'.format(option_group_kwargs['name'])) @@ -872,8 +927,7 @@ class RDS2Backend(BaseBackend): if option_group_name in self.option_groups: return self.option_groups.pop(option_group_name) else: - raise RDSClientError( - 'OptionGroupNotFoundFault', 'Specified OptionGroupName: {0} not found.'.format(option_group_name)) + raise OptionGroupNotFoundFaultError(option_group_name) def describe_option_groups(self, option_group_kwargs): option_group_list = [] @@ -902,8 +956,7 @@ class RDS2Backend(BaseBackend): else: option_group_list.append(option_group) if not len(option_group_list): - raise RDSClientError('OptionGroupNotFoundFault', - 'Specified OptionGroupName: {0} not found.'.format(option_group_kwargs['name'])) + raise OptionGroupNotFoundFaultError(option_group_kwargs['name']) return option_group_list[marker:max_records + marker] @staticmethod @@ -932,8 +985,7 @@ class RDS2Backend(BaseBackend): def modify_option_group(self, option_group_name, options_to_include=None, options_to_remove=None, apply_immediately=None): if option_group_name not in self.option_groups: - raise RDSClientError('OptionGroupNotFoundFault', - 'Specified OptionGroupName: {0} not found.'.format(option_group_name)) + raise OptionGroupNotFoundFaultError(option_group_name) if not options_to_include and not options_to_remove: raise RDSClientError('InvalidParameterValue', 'At least one option must be added, modified, or removed.') @@ -1019,8 +1071,8 @@ class RDS2Backend(BaseBackend): if resource_name in self.security_groups: return self.security_groups[resource_name].get_tags() elif resource_type == 'snapshot': # DB Snapshot - # TODO: Complete call to tags on resource type DB Snapshot - return [] + if resource_name in self.snapshots: + return self.snapshots[resource_name].get_tags() elif resource_type == 'subgrp': # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].get_tags() @@ -1050,7 +1102,8 @@ class RDS2Backend(BaseBackend): if resource_name in self.security_groups: return self.security_groups[resource_name].remove_tags(tag_keys) elif resource_type == 'snapshot': # DB Snapshot - return None + if resource_name in self.snapshots: + return self.snapshots[resource_name].remove_tags(tag_keys) elif resource_type == 'subgrp': # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].remove_tags(tag_keys) @@ -1079,7 +1132,8 @@ class RDS2Backend(BaseBackend): if resource_name in self.security_groups: return self.security_groups[resource_name].add_tags(tags) elif resource_type == 'snapshot': # DB Snapshot - return [] + if resource_name in self.snapshots: + return self.snapshots[resource_name].add_tags(tags) elif resource_type == 'subgrp': # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].add_tags(tags) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index bf76660aa..7b8d0b63a 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -19,6 +19,7 @@ class RDS2Response(BaseResponse): "allocated_storage": self._get_int_param('AllocatedStorage'), "availability_zone": self._get_param("AvailabilityZone"), "backup_retention_period": self._get_param("BackupRetentionPeriod"), + "copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"), "db_instance_class": self._get_param('DBInstanceClass'), "db_instance_identifier": self._get_param('DBInstanceIdentifier'), "db_name": self._get_param("DBName"), @@ -33,7 +34,7 @@ class RDS2Response(BaseResponse): "master_user_password": self._get_param('MasterUserPassword'), "master_username": self._get_param('MasterUsername'), "multi_az": self._get_bool_param("MultiAZ"), - # OptionGroupName + "option_group_name": self._get_param("OptionGroupName"), "port": self._get_param('Port'), # PreferredBackupWindow # PreferredMaintenanceWindow @@ -42,7 +43,7 @@ class RDS2Response(BaseResponse): "security_groups": self._get_multi_param('DBSecurityGroups.DBSecurityGroupName'), "storage_encrypted": self._get_param("StorageEncrypted"), "storage_type": self._get_param("StorageType", 'standard'), - # VpcSecurityGroupIds.member.N + "vpc_security_group_ids": self._get_multi_param("VpcSecurityGroupIds.VpcSecurityGroupId"), "tags": list(), } args['tags'] = self.unpack_complex_list_params( @@ -123,7 +124,7 @@ class RDS2Response(BaseResponse): start = all_ids.index(marker) + 1 else: start = 0 - page_size = self._get_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier + page_size = self._get_int_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier instances_resp = all_instances[start:start + page_size] next_marker = None if len(all_instances) > start + page_size: @@ -135,6 +136,9 @@ class RDS2Response(BaseResponse): def modify_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_kwargs = self._get_db_kwargs() + new_db_instance_identifier = self._get_param('NewDBInstanceIdentifier') + if new_db_instance_identifier: + db_kwargs['new_db_instance_identifier'] = new_db_instance_identifier database = self.backend.modify_database( db_instance_identifier, db_kwargs) template = self.response_template(MODIFY_DATABASE_TEMPLATE) @@ -156,7 +160,7 @@ class RDS2Response(BaseResponse): def create_db_snapshot(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_snapshot_identifier = self._get_param('DBSnapshotIdentifier') - tags = self._get_param('Tags', []) + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) snapshot = self.backend.create_snapshot(db_instance_identifier, db_snapshot_identifier, tags) template = self.response_template(CREATE_SNAPSHOT_TEMPLATE) return template.render(snapshot=snapshot) @@ -276,7 +280,7 @@ class RDS2Response(BaseResponse): def describe_option_groups(self): kwargs = self._get_option_group_kwargs() - kwargs['max_records'] = self._get_param('MaxRecords') + kwargs['max_records'] = self._get_int_param('MaxRecords') kwargs['marker'] = self._get_param('Marker') option_groups = self.backend.describe_option_groups(kwargs) template = self.response_template(DESCRIBE_OPTION_GROUP_TEMPLATE) @@ -325,7 +329,7 @@ class RDS2Response(BaseResponse): def describe_db_parameter_groups(self): kwargs = self._get_db_parameter_group_kwargs() - kwargs['max_records'] = self._get_param('MaxRecords') + kwargs['max_records'] = self._get_int_param('MaxRecords') kwargs['marker'] = self._get_param('Marker') db_parameter_groups = self.backend.describe_db_parameter_groups(kwargs) template = self.response_template( diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index a89ed5a04..b0cef57ad 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -58,6 +58,21 @@ class InvalidSubnetError(RedshiftClientError): "Subnet {0} not found.".format(subnet_identifier)) +class SnapshotCopyGrantAlreadyExistsFaultError(RedshiftClientError): + def __init__(self, snapshot_copy_grant_name): + super(SnapshotCopyGrantAlreadyExistsFaultError, self).__init__( + 'SnapshotCopyGrantAlreadyExistsFault', + "Cannot create the snapshot copy grant because a grant " + "with the identifier '{0}' already exists".format(snapshot_copy_grant_name)) + + +class SnapshotCopyGrantNotFoundFaultError(RedshiftClientError): + def __init__(self, snapshot_copy_grant_name): + super(SnapshotCopyGrantNotFoundFaultError, self).__init__( + 'SnapshotCopyGrantNotFoundFault', + "Snapshot copy grant not found: {0}".format(snapshot_copy_grant_name)) + + class ClusterSnapshotNotFoundError(RedshiftClientError): def __init__(self, snapshot_identifier): super(ClusterSnapshotNotFoundError, self).__init__( @@ -93,3 +108,24 @@ class ResourceNotFoundFaultError(RedshiftClientError): msg = message super(ResourceNotFoundFaultError, self).__init__( 'ResourceNotFoundFault', msg) + + +class SnapshotCopyDisabledFaultError(RedshiftClientError): + def __init__(self, cluster_identifier): + super(SnapshotCopyDisabledFaultError, self).__init__( + 'SnapshotCopyDisabledFault', + "Cannot modify retention period because snapshot copy is disabled on Cluster {0}.".format(cluster_identifier)) + + +class SnapshotCopyAlreadyDisabledFaultError(RedshiftClientError): + def __init__(self, cluster_identifier): + super(SnapshotCopyAlreadyDisabledFaultError, self).__init__( + 'SnapshotCopyAlreadyDisabledFault', + "Snapshot Copy is already disabled on Cluster {0}.".format(cluster_identifier)) + + +class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError): + def __init__(self, cluster_identifier): + super(SnapshotCopyAlreadyEnabledFaultError, self).__init__( + 'SnapshotCopyAlreadyEnabledFault', + "Snapshot Copy is already enabled on Cluster {0}.".format(cluster_identifier)) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index fa642ef01..64e5c5e35 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -4,6 +4,7 @@ import copy import datetime import boto.redshift +from botocore.exceptions import ClientError from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds @@ -17,7 +18,12 @@ from .exceptions import ( ClusterSubnetGroupNotFoundError, InvalidParameterValueError, InvalidSubnetError, - ResourceNotFoundFaultError + ResourceNotFoundFaultError, + SnapshotCopyAlreadyDisabledFaultError, + SnapshotCopyAlreadyEnabledFaultError, + SnapshotCopyDisabledFaultError, + SnapshotCopyGrantAlreadyExistsFaultError, + SnapshotCopyGrantNotFoundFaultError, ) @@ -67,10 +73,12 @@ class Cluster(TaggableResourceMixin, BaseModel): preferred_maintenance_window, cluster_parameter_group_name, automated_snapshot_retention_period, port, cluster_version, allow_version_upgrade, number_of_nodes, publicly_accessible, - encrypted, region_name, tags=None): + encrypted, region_name, tags=None, iam_roles_arn=None, + restored_from_snapshot=False): super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier + self.create_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) self.status = 'available' self.node_type = node_type self.master_username = master_username @@ -112,6 +120,9 @@ class Cluster(TaggableResourceMixin, BaseModel): else: self.number_of_nodes = 1 + self.iam_roles_arn = iam_roles_arn or [] + self.restored_from_snapshot = restored_from_snapshot + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): redshift_backend = redshift_backends[region_name] @@ -194,7 +205,7 @@ class Cluster(TaggableResourceMixin, BaseModel): return self.cluster_identifier def to_json(self): - return { + json_response = { "MasterUsername": self.master_username, "MasterUserPassword": "****", "ClusterVersion": self.cluster_version, @@ -227,8 +238,42 @@ class Cluster(TaggableResourceMixin, BaseModel): "Address": self.endpoint, "Port": self.port }, + 'ClusterCreateTime': self.create_time, "PendingModifiedValues": [], - "Tags": self.tags + "Tags": self.tags, + "IamRoles": [{ + "ApplyStatus": "in-sync", + "IamRoleArn": iam_role_arn + } for iam_role_arn in self.iam_roles_arn] + } + if self.restored_from_snapshot: + json_response['RestoreStatus'] = { + 'Status': 'completed', + 'CurrentRestoreRateInMegaBytesPerSecond': 123.0, + 'SnapshotSizeInMegaBytes': 123, + 'ProgressInMegaBytes': 123, + 'ElapsedTimeInSeconds': 123, + 'EstimatedTimeToCompletionInSeconds': 123 + } + try: + json_response['ClusterSnapshotCopyStatus'] = self.cluster_snapshot_copy_status + except AttributeError: + pass + return json_response + + +class SnapshotCopyGrant(TaggableResourceMixin, BaseModel): + + resource_type = 'snapshotcopygrant' + + def __init__(self, snapshot_copy_grant_name, kms_key_id): + self.snapshot_copy_grant_name = snapshot_copy_grant_name + self.kms_key_id = kms_key_id + + def to_json(self): + return { + "SnapshotCopyGrantName": self.snapshot_copy_grant_name, + "KmsKeyId": self.kms_key_id } @@ -351,7 +396,7 @@ class Snapshot(TaggableResourceMixin, BaseModel): resource_type = 'snapshot' - def __init__(self, cluster, snapshot_identifier, region_name, tags=None): + def __init__(self, cluster, snapshot_identifier, region_name, tags=None, iam_roles_arn=None): super(Snapshot, self).__init__(region_name, tags) self.cluster = copy.copy(cluster) self.snapshot_identifier = snapshot_identifier @@ -359,6 +404,7 @@ class Snapshot(TaggableResourceMixin, BaseModel): self.status = 'available' self.create_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now()) + self.iam_roles_arn = iam_roles_arn or [] @property def resource_id(self): @@ -380,7 +426,11 @@ class Snapshot(TaggableResourceMixin, BaseModel): 'NodeType': self.cluster.node_type, 'NumberOfNodes': self.cluster.number_of_nodes, 'DBName': self.cluster.db_name, - 'Tags': self.tags + 'Tags': self.tags, + "IamRoles": [{ + "ApplyStatus": "in-sync", + "IamRoleArn": iam_role_arn + } for iam_role_arn in self.iam_roles_arn] } @@ -410,6 +460,7 @@ class RedshiftBackend(BaseBackend): 'snapshot': self.snapshots, 'subnetgroup': self.subnet_groups } + self.snapshot_copy_grants = {} def reset(self): ec2_backend = self.ec2_backend @@ -417,6 +468,43 @@ class RedshiftBackend(BaseBackend): self.__dict__ = {} self.__init__(ec2_backend, region_name) + def enable_snapshot_copy(self, **kwargs): + cluster_identifier = kwargs['cluster_identifier'] + cluster = self.clusters[cluster_identifier] + if not hasattr(cluster, 'cluster_snapshot_copy_status'): + if cluster.encrypted == 'true' and kwargs['snapshot_copy_grant_name'] is None: + raise ClientError( + 'InvalidParameterValue', + 'SnapshotCopyGrantName is required for Snapshot Copy ' + 'on KMS encrypted clusters.' + ) + status = { + 'DestinationRegion': kwargs['destination_region'], + 'RetentionPeriod': kwargs['retention_period'], + 'SnapshotCopyGrantName': kwargs['snapshot_copy_grant_name'], + } + cluster.cluster_snapshot_copy_status = status + return cluster + else: + raise SnapshotCopyAlreadyEnabledFaultError(cluster_identifier) + + def disable_snapshot_copy(self, **kwargs): + cluster_identifier = kwargs['cluster_identifier'] + cluster = self.clusters[cluster_identifier] + if hasattr(cluster, 'cluster_snapshot_copy_status'): + del cluster.cluster_snapshot_copy_status + return cluster + else: + raise SnapshotCopyAlreadyDisabledFaultError(cluster_identifier) + + def modify_snapshot_copy_retention_period(self, cluster_identifier, retention_period): + cluster = self.clusters[cluster_identifier] + if hasattr(cluster, 'cluster_snapshot_copy_status'): + cluster.cluster_snapshot_copy_status['RetentionPeriod'] = retention_period + return cluster + else: + raise SnapshotCopyDisabledFaultError(cluster_identifier) + def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs['cluster_identifier'] cluster = Cluster(self, **cluster_kwargs) @@ -443,14 +531,37 @@ class RedshiftBackend(BaseBackend): setattr(cluster, key, value) if new_cluster_identifier: - self.delete_cluster(cluster_identifier) + dic = { + "cluster_identifier": cluster_identifier, + "skip_final_snapshot": True, + "final_cluster_snapshot_identifier": None + } + self.delete_cluster(**dic) cluster.cluster_identifier = new_cluster_identifier self.clusters[new_cluster_identifier] = cluster return cluster - def delete_cluster(self, cluster_identifier): + def delete_cluster(self, **cluster_kwargs): + cluster_identifier = cluster_kwargs.pop("cluster_identifier") + cluster_skip_final_snapshot = cluster_kwargs.pop("skip_final_snapshot") + cluster_snapshot_identifer = cluster_kwargs.pop("final_cluster_snapshot_identifier") + if cluster_identifier in self.clusters: + if cluster_skip_final_snapshot is False and cluster_snapshot_identifer is None: + raise ClientError( + "InvalidParameterValue", + 'FinalSnapshotIdentifier is required for Snapshot copy ' + 'when SkipFinalSnapshot is False' + ) + elif cluster_skip_final_snapshot is False and cluster_snapshot_identifer is not None: # create snapshot + cluster = self.describe_clusters(cluster_identifier)[0] + self.create_cluster_snapshot( + cluster_identifier, + cluster_snapshot_identifer, + cluster.region, + cluster.tags) + return self.clusters.pop(cluster_identifier) raise ClusterNotFoundError(cluster_identifier) @@ -529,9 +640,12 @@ class RedshiftBackend(BaseBackend): def describe_cluster_snapshots(self, cluster_identifier=None, snapshot_identifier=None): if cluster_identifier: + cluster_snapshots = [] for snapshot in self.snapshots.values(): if snapshot.cluster.cluster_identifier == cluster_identifier: - return [snapshot] + cluster_snapshots.append(snapshot) + if cluster_snapshots: + return cluster_snapshots raise ClusterNotFoundError(cluster_identifier) if snapshot_identifier: @@ -563,11 +677,37 @@ class RedshiftBackend(BaseBackend): "cluster_version": snapshot.cluster.cluster_version, "number_of_nodes": snapshot.cluster.number_of_nodes, "encrypted": snapshot.cluster.encrypted, - "tags": snapshot.cluster.tags + "tags": snapshot.cluster.tags, + "restored_from_snapshot": True } create_kwargs.update(kwargs) return self.create_cluster(**create_kwargs) + def create_snapshot_copy_grant(self, **kwargs): + snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] + kms_key_id = kwargs['kms_key_id'] + if snapshot_copy_grant_name not in self.snapshot_copy_grants: + snapshot_copy_grant = SnapshotCopyGrant(snapshot_copy_grant_name, kms_key_id) + self.snapshot_copy_grants[snapshot_copy_grant_name] = snapshot_copy_grant + return snapshot_copy_grant + raise SnapshotCopyGrantAlreadyExistsFaultError(snapshot_copy_grant_name) + + def delete_snapshot_copy_grant(self, **kwargs): + snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] + if snapshot_copy_grant_name in self.snapshot_copy_grants: + return self.snapshot_copy_grants.pop(snapshot_copy_grant_name) + raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name) + + def describe_snapshot_copy_grants(self, **kwargs): + copy_grants = self.snapshot_copy_grants.values() + snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] + if snapshot_copy_grant_name: + if snapshot_copy_grant_name in self.snapshot_copy_grants: + return [self.snapshot_copy_grants[snapshot_copy_grant_name]] + else: + raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name) + return copy_grants + def _get_resource_from_arn(self, arn): try: arn_breakdown = arn.split(':') diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index a320f9cae..a7758febb 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -99,6 +99,12 @@ class RedshiftResponse(BaseResponse): vpc_security_group_ids = self._get_multi_param('VpcSecurityGroupIds.VpcSecurityGroupId') return vpc_security_group_ids + def _get_iam_roles(self): + iam_roles = self._get_multi_param('IamRoles.member') + if not iam_roles: + iam_roles = self._get_multi_param('IamRoles.IamRoleArn') + return iam_roles + def _get_subnet_ids(self): subnet_ids = self._get_multi_param('SubnetIds.member') if not subnet_ids: @@ -127,7 +133,8 @@ class RedshiftResponse(BaseResponse): "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), "region_name": self.region, - "tags": self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + "tags": self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')), + "iam_roles_arn": self._get_iam_roles(), } cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json() cluster['ClusterStatus'] = 'creating' @@ -162,6 +169,7 @@ class RedshiftResponse(BaseResponse): "automated_snapshot_retention_period": self._get_int_param( 'AutomatedSnapshotRetentionPeriod'), "region_name": self.region, + "iam_roles_arn": self._get_iam_roles(), } cluster = self.redshift_backend.restore_from_cluster_snapshot(**restore_kwargs).to_json() cluster['ClusterStatus'] = 'creating' @@ -209,6 +217,7 @@ class RedshiftResponse(BaseResponse): "number_of_nodes": self._get_int_param('NumberOfNodes'), "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), + "iam_roles_arn": self._get_iam_roles(), } cluster_kwargs = {} # We only want parameters that were actually passed in, otherwise @@ -231,8 +240,13 @@ class RedshiftResponse(BaseResponse): }) def delete_cluster(self): - cluster_identifier = self._get_param("ClusterIdentifier") - cluster = self.redshift_backend.delete_cluster(cluster_identifier) + request_kwargs = { + "cluster_identifier": self._get_param("ClusterIdentifier"), + "final_cluster_snapshot_identifier": self._get_param("FinalClusterSnapshotIdentifier"), + "skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot") + } + + cluster = self.redshift_backend.delete_cluster(**request_kwargs) return self.get_response({ "DeleteClusterResponse": { @@ -457,6 +471,55 @@ class RedshiftResponse(BaseResponse): } }) + def create_snapshot_copy_grant(self): + copy_grant_kwargs = { + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + 'kms_key_id': self._get_param('KmsKeyId'), + 'region_name': self._get_param('Region'), + } + + copy_grant = self.redshift_backend.create_snapshot_copy_grant(**copy_grant_kwargs) + return self.get_response({ + "CreateSnapshotCopyGrantResponse": { + "CreateSnapshotCopyGrantResult": { + "SnapshotCopyGrant": copy_grant.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def delete_snapshot_copy_grant(self): + copy_grant_kwargs = { + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + } + self.redshift_backend.delete_snapshot_copy_grant(**copy_grant_kwargs) + return self.get_response({ + "DeleteSnapshotCopyGrantResponse": { + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def describe_snapshot_copy_grants(self): + copy_grant_kwargs = { + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + } + + copy_grants = self.redshift_backend.describe_snapshot_copy_grants(**copy_grant_kwargs) + return self.get_response({ + "DescribeSnapshotCopyGrantsResponse": { + "DescribeSnapshotCopyGrantsResult": { + "SnapshotCopyGrants": [copy_grant.to_json() for copy_grant in copy_grants] + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + def create_tags(self): resource_name = self._get_param('ResourceName') tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) @@ -501,3 +564,58 @@ class RedshiftResponse(BaseResponse): } } }) + + def enable_snapshot_copy(self): + snapshot_copy_kwargs = { + 'cluster_identifier': self._get_param('ClusterIdentifier'), + 'destination_region': self._get_param('DestinationRegion'), + 'retention_period': self._get_param('RetentionPeriod', 7), + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + } + cluster = self.redshift_backend.enable_snapshot_copy(**snapshot_copy_kwargs) + + return self.get_response({ + "EnableSnapshotCopyResponse": { + "EnableSnapshotCopyResult": { + "Cluster": cluster.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def disable_snapshot_copy(self): + snapshot_copy_kwargs = { + 'cluster_identifier': self._get_param('ClusterIdentifier'), + } + cluster = self.redshift_backend.disable_snapshot_copy(**snapshot_copy_kwargs) + + return self.get_response({ + "DisableSnapshotCopyResponse": { + "DisableSnapshotCopyResult": { + "Cluster": cluster.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def modify_snapshot_copy_retention_period(self): + snapshot_copy_kwargs = { + 'cluster_identifier': self._get_param('ClusterIdentifier'), + 'retention_period': self._get_param('RetentionPeriod'), + } + cluster = self.redshift_backend.modify_snapshot_copy_retention_period(**snapshot_copy_kwargs) + + return self.get_response({ + "ModifySnapshotCopyRetentionPeriodResponse": { + "ModifySnapshotCopyRetentionPeriodResult": { + "Clusters": [cluster.to_json()] + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) diff --git a/moto/resourcegroups/__init__.py b/moto/resourcegroups/__init__.py new file mode 100644 index 000000000..74b0eb598 --- /dev/null +++ b/moto/resourcegroups/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import resourcegroups_backends +from ..core.models import base_decorator + +resourcegroups_backend = resourcegroups_backends['us-east-1'] +mock_resourcegroups = base_decorator(resourcegroups_backends) diff --git a/moto/resourcegroups/exceptions.py b/moto/resourcegroups/exceptions.py new file mode 100644 index 000000000..a8e542979 --- /dev/null +++ b/moto/resourcegroups/exceptions.py @@ -0,0 +1,13 @@ +from __future__ import unicode_literals +import json + +from werkzeug.exceptions import HTTPException + + +class BadRequestException(HTTPException): + code = 400 + + def __init__(self, message, **kwargs): + super(BadRequestException, self).__init__( + description=json.dumps({"Message": message, "Code": "BadRequestException"}), **kwargs + ) diff --git a/moto/resourcegroups/models.py b/moto/resourcegroups/models.py new file mode 100644 index 000000000..6734bd48a --- /dev/null +++ b/moto/resourcegroups/models.py @@ -0,0 +1,338 @@ +from __future__ import unicode_literals +from builtins import str + +import boto3 +import json +import re + +from moto.core import BaseBackend, BaseModel +from .exceptions import BadRequestException + + +class FakeResourceGroup(BaseModel): + def __init__(self, name, resource_query, description=None, tags=None): + self.errors = [] + description = description or "" + tags = tags or {} + if self._validate_description(value=description): + self._description = description + if self._validate_name(value=name): + self._name = name + if self._validate_resource_query(value=resource_query): + self._resource_query = resource_query + if self._validate_tags(value=tags): + self._tags = tags + self._raise_errors() + self.arn = "arn:aws:resource-groups:us-west-1:123456789012:{name}".format(name=name) + + @staticmethod + def _format_error(key, value, constraint): + return "Value '{value}' at '{key}' failed to satisfy constraint: {constraint}".format( + constraint=constraint, + key=key, + value=value, + ) + + def _raise_errors(self): + if self.errors: + errors_len = len(self.errors) + plural = "s" if len(self.errors) > 1 else "" + errors = "; ".join(self.errors) + raise BadRequestException("{errors_len} validation error{plural} detected: {errors}".format( + errors_len=errors_len, plural=plural, errors=errors, + )) + + def _validate_description(self, value): + errors = [] + if len(value) > 511: + errors.append(self._format_error( + key="description", + value=value, + constraint="Member must have length less than or equal to 512", + )) + if not re.match(r"^[\sa-zA-Z0-9_.-]*$", value): + errors.append(self._format_error( + key="name", + value=value, + constraint=r"Member must satisfy regular expression pattern: [\sa-zA-Z0-9_\.-]*", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_name(self, value): + errors = [] + if len(value) > 128: + errors.append(self._format_error( + key="name", + value=value, + constraint="Member must have length less than or equal to 128", + )) + # Note \ is a character to match not an escape. + if not re.match(r"^[a-zA-Z0-9_\\.-]+$", value): + errors.append(self._format_error( + key="name", + value=value, + constraint=r"Member must satisfy regular expression pattern: [a-zA-Z0-9_\.-]+", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_resource_query(self, value): + errors = [] + if value["Type"] not in {"CLOUDFORMATION_STACK_1_0", "TAG_FILTERS_1_0"}: + errors.append(self._format_error( + key="resourceQuery.type", + value=value, + constraint="Member must satisfy enum value set: [CLOUDFORMATION_STACK_1_0, TAG_FILTERS_1_0]", + )) + if len(value["Query"]) > 2048: + errors.append(self._format_error( + key="resourceQuery.query", + value=value, + constraint="Member must have length less than or equal to 2048", + )) + if errors: + self.errors += errors + return False + return True + + def _validate_tags(self, value): + errors = [] + # AWS only outputs one error for all keys and one for all values. + error_keys = None + error_values = None + regex = re.compile(r"^([\\p{L}\\p{Z}\\p{N}_.:/=+\-@]*)$") + for tag_key, tag_value in value.items(): + # Validation for len(tag_key) >= 1 is done by botocore. + if len(tag_key) > 128 or re.match(regex, tag_key): + error_keys = self._format_error( + key="tags", + value=value, + constraint=( + "Map value must satisfy constraint: [" + "Member must have length less than or equal to 128, " + "Member must have length greater than or equal to 1, " + r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$" + "]" + ), + ) + # Validation for len(tag_value) >= 0 is nonsensical. + if len(tag_value) > 256 or re.match(regex, tag_key): + error_values = self._format_error( + key="tags", + value=value, + constraint=( + "Map value must satisfy constraint: [" + "Member must have length less than or equal to 256, " + "Member must have length greater than or equal to 0, " + r"Member must satisfy regular expression pattern: ^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$" + "]" + ), + ) + if error_keys: + errors.append(error_keys) + if error_values: + errors.append(error_values) + if errors: + self.errors += errors + return False + return True + + @property + def description(self): + return self._description + + @description.setter + def description(self, value): + if not self._validate_description(value=value): + self._raise_errors() + self._description = value + + @property + def name(self): + return self._name + + @name.setter + def name(self, value): + if not self._validate_name(value=value): + self._raise_errors() + self._name = value + + @property + def resource_query(self): + return self._resource_query + + @resource_query.setter + def resource_query(self, value): + if not self._validate_resource_query(value=value): + self._raise_errors() + self._resource_query = value + + @property + def tags(self): + return self._tags + + @tags.setter + def tags(self, value): + if not self._validate_tags(value=value): + self._raise_errors() + self._tags = value + + +class ResourceGroups(): + def __init__(self): + self.by_name = {} + self.by_arn = {} + + def __contains__(self, item): + return item in self.by_name + + def append(self, resource_group): + self.by_name[resource_group.name] = resource_group + self.by_arn[resource_group.arn] = resource_group + + def delete(self, name): + group = self.by_name[name] + del self.by_name[name] + del self.by_arn[group.arn] + return group + + +class ResourceGroupsBackend(BaseBackend): + def __init__(self, region_name=None): + super(ResourceGroupsBackend, self).__init__() + self.region_name = region_name + self.groups = ResourceGroups() + + @staticmethod + def _validate_resource_query(resource_query): + type = resource_query["Type"] + query = json.loads(resource_query["Query"]) + query_keys = set(query.keys()) + invalid_json_exception = BadRequestException("Invalid query: Invalid query format: check JSON syntax") + if not isinstance(query["ResourceTypeFilters"], list): + raise invalid_json_exception + if type == "CLOUDFORMATION_STACK_1_0": + if query_keys != {"ResourceTypeFilters", "StackIdentifier"}: + raise invalid_json_exception + stack_identifier = query["StackIdentifier"] + if not isinstance(stack_identifier, str): + raise invalid_json_exception + if not re.match( + r"^arn:aws:cloudformation:[a-z]{2}-[a-z]+-[0-9]+:[0-9]+:stack/[-0-9A-z]+/[-0-9a-f]+$", + stack_identifier, + ): + raise BadRequestException( + "Invalid query: Verify that the specified ARN is formatted correctly." + ) + # Once checking other resources is implemented. + # if stack_identifier not in self.cloudformation_backend.stacks: + # raise BadRequestException("Invalid query: The specified CloudFormation stack doesn't exist.") + if type == "TAG_FILTERS_1_0": + if query_keys != {"ResourceTypeFilters", "TagFilters"}: + raise invalid_json_exception + tag_filters = query["TagFilters"] + if not isinstance(tag_filters, list): + raise invalid_json_exception + if not tag_filters or len(tag_filters) > 50: + raise BadRequestException( + "Invalid query: The TagFilters list must contain between 1 and 50 elements" + ) + for tag_filter in tag_filters: + if not isinstance(tag_filter, dict): + raise invalid_json_exception + if set(tag_filter.keys()) != {"Key", "Values"}: + raise invalid_json_exception + key = tag_filter["Key"] + if not isinstance(key, str): + raise invalid_json_exception + if not key: + raise BadRequestException( + "Invalid query: The TagFilter element cannot have empty or null Key field" + ) + if len(key) > 128: + raise BadRequestException("Invalid query: The maximum length for a tag Key is 128") + values = tag_filter["Values"] + if not isinstance(values, list): + raise invalid_json_exception + if len(values) > 20: + raise BadRequestException( + "Invalid query: The TagFilter Values list must contain between 0 and 20 elements" + ) + for value in values: + if not isinstance(value, str): + raise invalid_json_exception + if len(value) > 256: + raise BadRequestException( + "Invalid query: The maximum length for a tag Value is 256" + ) + + @staticmethod + def _validate_tags(tags): + for tag in tags: + if tag.lower().startswith('aws:'): + raise BadRequestException("Tag keys must not start with 'aws:'") + + def create_group(self, name, resource_query, description=None, tags=None): + tags = tags or {} + group = FakeResourceGroup( + name=name, + resource_query=resource_query, + description=description, + tags=tags, + ) + if name in self.groups: + raise BadRequestException("Cannot create group: group already exists") + if name.upper().startswith("AWS"): + raise BadRequestException("Group name must not start with 'AWS'") + self._validate_tags(tags) + self._validate_resource_query(resource_query) + self.groups.append(group) + return group + + def delete_group(self, group_name): + return self.groups.delete(name=group_name) + + def get_group(self, group_name): + return self.groups.by_name[group_name] + + def get_tags(self, arn): + return self.groups.by_arn[arn].tags + + # def list_group_resources(self): + # ... + + def list_groups(self, filters=None, max_results=None, next_token=None): + return self.groups.by_name + + # def search_resources(self): + # ... + + def tag(self, arn, tags): + all_tags = self.groups.by_arn[arn].tags + all_tags.update(tags) + self._validate_tags(all_tags) + self.groups.by_arn[arn].tags = all_tags + + def untag(self, arn, keys): + group = self.groups.by_arn[arn] + for key in keys: + del group.tags[key] + + def update_group(self, group_name, description=None): + if description: + self.groups.by_name[group_name].description = description + return self.groups.by_name[group_name] + + def update_group_query(self, group_name, resource_query): + self._validate_resource_query(resource_query) + self.groups.by_name[group_name].resource_query = resource_query + return self.groups.by_name[group_name] + + +available_regions = boto3.session.Session().get_available_regions("resource-groups") +resourcegroups_backends = {region: ResourceGroupsBackend(region_name=region) for region in available_regions} diff --git a/moto/resourcegroups/responses.py b/moto/resourcegroups/responses.py new file mode 100644 index 000000000..02ea14c1a --- /dev/null +++ b/moto/resourcegroups/responses.py @@ -0,0 +1,162 @@ +from __future__ import unicode_literals +import json + +try: + from urllib import unquote +except ImportError: + from urllib.parse import unquote + +from moto.core.responses import BaseResponse +from .models import resourcegroups_backends + + +class ResourceGroupsResponse(BaseResponse): + SERVICE_NAME = 'resource-groups' + + @property + def resourcegroups_backend(self): + return resourcegroups_backends[self.region] + + def create_group(self): + name = self._get_param("Name") + description = self._get_param("Description") + resource_query = self._get_param("ResourceQuery") + tags = self._get_param("Tags") + group = self.resourcegroups_backend.create_group( + name=name, + description=description, + resource_query=resource_query, + tags=tags, + ) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + "ResourceQuery": group.resource_query, + "Tags": group.tags + }) + + def delete_group(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.delete_group(group_name=group_name) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + }) + + def get_group(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.get_group(group_name=group_name) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description, + } + }) + + def get_group_query(self): + group_name = self._get_param("GroupName") + group = self.resourcegroups_backend.get_group(group_name=group_name) + return json.dumps({ + "GroupQuery": { + "GroupName": group.name, + "ResourceQuery": group.resource_query, + } + }) + + def get_tags(self): + arn = unquote(self._get_param("Arn")) + return json.dumps({ + "Arn": arn, + "Tags": self.resourcegroups_backend.get_tags(arn=arn) + }) + + def list_group_resources(self): + raise NotImplementedError('ResourceGroups.list_group_resources is not yet implemented') + + def list_groups(self): + filters = self._get_param("Filters") + if filters: + raise NotImplementedError( + 'ResourceGroups.list_groups with filter parameter is not yet implemented' + ) + max_results = self._get_int_param("MaxResults", 50) + next_token = self._get_param("NextToken") + groups = self.resourcegroups_backend.list_groups( + filters=filters, + max_results=max_results, + next_token=next_token + ) + return json.dumps({ + "GroupIdentifiers": [{ + "GroupName": group.name, + "GroupArn": group.arn, + } for group in groups.values()], + "Groups": [{ + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description, + } for group in groups.values()], + "NextToken": next_token, + }) + + def search_resources(self): + raise NotImplementedError('ResourceGroups.search_resources is not yet implemented') + + def tag(self): + arn = unquote(self._get_param("Arn")) + tags = self._get_param("Tags") + if arn not in self.resourcegroups_backend.groups.by_arn: + raise NotImplementedError( + 'ResourceGroups.tag with non-resource-group Arn parameter is not yet implemented' + ) + self.resourcegroups_backend.tag(arn=arn, tags=tags) + return json.dumps({ + "Arn": arn, + "Tags": tags + }) + + def untag(self): + arn = unquote(self._get_param("Arn")) + keys = self._get_param("Keys") + if arn not in self.resourcegroups_backend.groups.by_arn: + raise NotImplementedError( + 'ResourceGroups.untag with non-resource-group Arn parameter is not yet implemented' + ) + self.resourcegroups_backend.untag(arn=arn, keys=keys) + return json.dumps({ + "Arn": arn, + "Keys": keys + }) + + def update_group(self): + group_name = self._get_param("GroupName") + description = self._get_param("Description", "") + group = self.resourcegroups_backend.update_group(group_name=group_name, description=description) + return json.dumps({ + "Group": { + "GroupArn": group.arn, + "Name": group.name, + "Description": group.description + }, + }) + + def update_group_query(self): + group_name = self._get_param("GroupName") + resource_query = self._get_param("ResourceQuery") + group = self.resourcegroups_backend.update_group_query( + group_name=group_name, + resource_query=resource_query + ) + return json.dumps({ + "GroupQuery": { + "GroupName": group.name, + "ResourceQuery": resource_query + } + }) diff --git a/moto/resourcegroups/urls.py b/moto/resourcegroups/urls.py new file mode 100644 index 000000000..518dde766 --- /dev/null +++ b/moto/resourcegroups/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import ResourceGroupsResponse + +url_bases = [ + "https?://resource-groups(-fips)?.(.+).amazonaws.com", +] + +url_paths = { + '{0}/groups$': ResourceGroupsResponse.dispatch, + '{0}/groups/(?P[^/]+)$': ResourceGroupsResponse.dispatch, + '{0}/groups/(?P[^/]+)/query$': ResourceGroupsResponse.dispatch, + '{0}/groups-list$': ResourceGroupsResponse.dispatch, + '{0}/resources/(?P[^/]+)/tags$': ResourceGroupsResponse.dispatch, +} diff --git a/moto/resourcegroupstaggingapi/__init__.py b/moto/resourcegroupstaggingapi/__init__.py new file mode 100644 index 000000000..bd0c4a7df --- /dev/null +++ b/moto/resourcegroupstaggingapi/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import resourcegroupstaggingapi_backends +from ..core.models import base_decorator + +resourcegroupstaggingapi_backend = resourcegroupstaggingapi_backends['us-east-1'] +mock_resourcegroupstaggingapi = base_decorator(resourcegroupstaggingapi_backends) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py new file mode 100644 index 000000000..4aec63aa6 --- /dev/null +++ b/moto/resourcegroupstaggingapi/models.py @@ -0,0 +1,530 @@ +from __future__ import unicode_literals +import uuid +import boto3 +import six +from moto.core import BaseBackend +from moto.core.exceptions import RESTError + +from moto.s3 import s3_backends +from moto.ec2 import ec2_backends +from moto.elb import elb_backends +from moto.elbv2 import elbv2_backends +from moto.kinesis import kinesis_backends +from moto.rds2 import rds2_backends +from moto.glacier import glacier_backends +from moto.redshift import redshift_backends +from moto.emr import emr_backends + +# Left: EC2 ElastiCache RDS ELB CloudFront WorkSpaces Lambda EMR Glacier Kinesis Redshift Route53 +# StorageGateway DynamoDB MachineLearning ACM DirectConnect DirectoryService CloudHSM +# Inspector Elasticsearch + + +class ResourceGroupsTaggingAPIBackend(BaseBackend): + def __init__(self, region_name=None): + super(ResourceGroupsTaggingAPIBackend, self).__init__() + self.region_name = region_name + + self._pages = {} + # Like 'someuuid': {'gen': , 'misc': None} + # Misc is there for peeking from a generator and it cant + # fit in the current request. As we only store generators + # theres not really any point to clean up + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + @property + def s3_backend(self): + """ + :rtype: moto.s3.models.S3Backend + """ + return s3_backends['global'] + + @property + def ec2_backend(self): + """ + :rtype: moto.ec2.models.EC2Backend + """ + return ec2_backends[self.region_name] + + @property + def elb_backend(self): + """ + :rtype: moto.elb.models.ELBBackend + """ + return elb_backends[self.region_name] + + @property + def elbv2_backend(self): + """ + :rtype: moto.elbv2.models.ELBv2Backend + """ + return elbv2_backends[self.region_name] + + @property + def kinesis_backend(self): + """ + :rtype: moto.kinesis.models.KinesisBackend + """ + return kinesis_backends[self.region_name] + + @property + def rds_backend(self): + """ + :rtype: moto.rds2.models.RDS2Backend + """ + return rds2_backends[self.region_name] + + @property + def glacier_backend(self): + """ + :rtype: moto.glacier.models.GlacierBackend + """ + return glacier_backends[self.region_name] + + @property + def emr_backend(self): + """ + :rtype: moto.emr.models.ElasticMapReduceBackend + """ + return emr_backends[self.region_name] + + @property + def redshift_backend(self): + """ + :rtype: moto.redshift.models.RedshiftBackend + """ + return redshift_backends[self.region_name] + + def _get_resources_generator(self, tag_filters=None, resource_type_filters=None): + # Look at + # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + + # TODO move these to their respective backends + filters = [lambda t, v: True] + for tag_filter_dict in tag_filters: + values = tag_filter_dict.get('Values', []) + if len(values) == 0: + # Check key matches + filters.append(lambda t, v: t == tag_filter_dict['Key']) + elif len(values) == 1: + # Check its exactly the same as key, value + filters.append(lambda t, v: t == tag_filter_dict['Key'] and v == values[0]) + else: + # Check key matches and value is one of the provided values + filters.append(lambda t, v: t == tag_filter_dict['Key'] and v in values) + + def tag_filter(tag_list): + result = [] + if tag_filters: + for tag in tag_list: + temp_result = [] + for f in filters: + f_result = f(tag['Key'], tag['Value']) + temp_result.append(f_result) + result.append(all(temp_result)) + + return any(result) + else: + return True + + # Do S3, resource type s3 + if not resource_type_filters or 's3' in resource_type_filters: + for bucket in self.s3_backend.buckets.values(): + tags = [] + for tag in bucket.tags.tag_set.tags: + tags.append({'Key': tag.key, 'Value': tag.value}) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:s3:::' + bucket.name, 'Tags': tags} + + # EC2 tags + def get_ec2_tags(res_id): + result = [] + for key, value in self.ec2_backend.tags.get(res_id, {}).items(): + result.append({'Key': key, 'Value': value}) + return result + + # EC2 AMI, resource type ec2:image + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:image' in resource_type_filters: + for ami in self.ec2_backend.amis.values(): + tags = get_ec2_tags(ami.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::image/{1}'.format(self.region_name, ami.id), 'Tags': tags} + + # EC2 Instance, resource type ec2:instance + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:instance' in resource_type_filters: + for reservation in self.ec2_backend.reservations.values(): + for instance in reservation.instances: + tags = get_ec2_tags(instance.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::instance/{1}'.format(self.region_name, instance.id), 'Tags': tags} + + # EC2 NetworkInterface, resource type ec2:network-interface + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:network-interface' in resource_type_filters: + for eni in self.ec2_backend.enis.values(): + tags = get_ec2_tags(eni.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::network-interface/{1}'.format(self.region_name, eni.id), 'Tags': tags} + + # TODO EC2 ReservedInstance + + # EC2 SecurityGroup, resource type ec2:security-group + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:security-group' in resource_type_filters: + for vpc in self.ec2_backend.groups.values(): + for sg in vpc.values(): + tags = get_ec2_tags(sg.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::security-group/{1}'.format(self.region_name, sg.id), 'Tags': tags} + + # EC2 Snapshot, resource type ec2:snapshot + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:snapshot' in resource_type_filters: + for snapshot in self.ec2_backend.snapshots.values(): + tags = get_ec2_tags(snapshot.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::snapshot/{1}'.format(self.region_name, snapshot.id), 'Tags': tags} + + # TODO EC2 SpotInstanceRequest + + # EC2 Volume, resource type ec2:volume + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:volume' in resource_type_filters: + for volume in self.ec2_backend.volumes.values(): + tags = get_ec2_tags(volume.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::volume/{1}'.format(self.region_name, volume.id), 'Tags': tags} + + # TODO add these to the keys and values functions / combine functions + # ELB + + def get_elbv2_tags(arn): + result = [] + for key, value in self.elbv2_backend.load_balancers[elb.arn].tags.items(): + result.append({'Key': key, 'Value': value}) + return result + + if not resource_type_filters or 'elasticloadbalancer' in resource_type_filters or 'elasticloadbalancer:loadbalancer' in resource_type_filters: + for elb in self.elbv2_backend.load_balancers.values(): + tags = get_elbv2_tags(elb.arn) + # if 'elasticloadbalancer:loadbalancer' in resource_type_filters: + # from IPython import embed + # embed() + if not tag_filter(tags): # Skip if no tags, or invalid filter + continue + + yield {'ResourceARN': '{0}'.format(elb.arn), 'Tags': tags} + + # EMR Cluster + + # Glacier Vault + + # Kinesis + + # RDS Instance + # RDS Reserved Database Instance + # RDS Option Group + # RDS Parameter Group + # RDS Security Group + # RDS Snapshot + # RDS Subnet Group + # RDS Event Subscription + + # RedShift Cluster + # RedShift Hardware security module (HSM) client certificate + # RedShift HSM connection + # RedShift Parameter group + # RedShift Snapshot + # RedShift Subnet group + + # VPC + # VPC Customer Gateway + # VPC DHCP Option Set + # VPC Internet Gateway + # VPC Network ACL + # VPC Route Table + # VPC Subnet + # VPC Virtual Private Gateway + # VPC VPN Connection + + def _get_tag_keys_generator(self): + # Look at + # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + + # Do S3, resource type s3 + for bucket in self.s3_backend.buckets.values(): + for tag in bucket.tags.tag_set.tags: + yield tag.key + + # EC2 tags + def get_ec2_keys(res_id): + result = [] + for key in self.ec2_backend.tags.get(res_id, {}): + result.append(key) + return result + + # EC2 AMI, resource type ec2:image + for ami in self.ec2_backend.amis.values(): + for key in get_ec2_keys(ami.id): + yield key + + # EC2 Instance, resource type ec2:instance + for reservation in self.ec2_backend.reservations.values(): + for instance in reservation.instances: + for key in get_ec2_keys(instance.id): + yield key + + # EC2 NetworkInterface, resource type ec2:network-interface + for eni in self.ec2_backend.enis.values(): + for key in get_ec2_keys(eni.id): + yield key + + # TODO EC2 ReservedInstance + + # EC2 SecurityGroup, resource type ec2:security-group + for vpc in self.ec2_backend.groups.values(): + for sg in vpc.values(): + for key in get_ec2_keys(sg.id): + yield key + + # EC2 Snapshot, resource type ec2:snapshot + for snapshot in self.ec2_backend.snapshots.values(): + for key in get_ec2_keys(snapshot.id): + yield key + + # TODO EC2 SpotInstanceRequest + + # EC2 Volume, resource type ec2:volume + for volume in self.ec2_backend.volumes.values(): + for key in get_ec2_keys(volume.id): + yield key + + def _get_tag_values_generator(self, tag_key): + # Look at + # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + + # Do S3, resource type s3 + for bucket in self.s3_backend.buckets.values(): + for tag in bucket.tags.tag_set.tags: + if tag.key == tag_key: + yield tag.value + + # EC2 tags + def get_ec2_values(res_id): + result = [] + for key, value in self.ec2_backend.tags.get(res_id, {}).items(): + if key == tag_key: + result.append(value) + return result + + # EC2 AMI, resource type ec2:image + for ami in self.ec2_backend.amis.values(): + for value in get_ec2_values(ami.id): + yield value + + # EC2 Instance, resource type ec2:instance + for reservation in self.ec2_backend.reservations.values(): + for instance in reservation.instances: + for value in get_ec2_values(instance.id): + yield value + + # EC2 NetworkInterface, resource type ec2:network-interface + for eni in self.ec2_backend.enis.values(): + for value in get_ec2_values(eni.id): + yield value + + # TODO EC2 ReservedInstance + + # EC2 SecurityGroup, resource type ec2:security-group + for vpc in self.ec2_backend.groups.values(): + for sg in vpc.values(): + for value in get_ec2_values(sg.id): + yield value + + # EC2 Snapshot, resource type ec2:snapshot + for snapshot in self.ec2_backend.snapshots.values(): + for value in get_ec2_values(snapshot.id): + yield value + + # TODO EC2 SpotInstanceRequest + + # EC2 Volume, resource type ec2:volume + for volume in self.ec2_backend.volumes.values(): + for value in get_ec2_values(volume.id): + yield value + + def get_resources(self, pagination_token=None, + resources_per_page=50, tags_per_page=100, + tag_filters=None, resource_type_filters=None): + # Simple range checning + if 100 >= tags_per_page >= 500: + raise RESTError('InvalidParameterException', 'TagsPerPage must be between 100 and 500') + if 1 >= resources_per_page >= 50: + raise RESTError('InvalidParameterException', 'ResourcesPerPage must be between 1 and 50') + + # If we have a token, go and find the respective generator, or error + if pagination_token: + if pagination_token not in self._pages: + raise RESTError('PaginationTokenExpiredException', 'Token does not exist') + + generator = self._pages[pagination_token]['gen'] + left_over = self._pages[pagination_token]['misc'] + else: + generator = self._get_resources_generator(tag_filters=tag_filters, + resource_type_filters=resource_type_filters) + left_over = None + + result = [] + current_tags = 0 + current_resources = 0 + if left_over: + result.append(left_over) + current_resources += 1 + current_tags += len(left_over['Tags']) + + try: + while True: + # Generator format: [{'ResourceARN': str, 'Tags': [{'Key': str, 'Value': str]}, ...] + next_item = six.next(generator) + resource_tags = len(next_item['Tags']) + + if current_resources >= resources_per_page: + break + if current_tags + resource_tags >= tags_per_page: + break + + current_resources += 1 + current_tags += resource_tags + + result.append(next_item) + + except StopIteration: + # Finished generator before invalidating page limiting constraints + return None, result + + # Didn't hit StopIteration so there's stuff left in generator + new_token = str(uuid.uuid4()) + self._pages[new_token] = {'gen': generator, 'misc': next_item} + + # Token used up, might as well bin now, if you call it again your an idiot + if pagination_token: + del self._pages[pagination_token] + + return new_token, result + + def get_tag_keys(self, pagination_token=None): + + if pagination_token: + if pagination_token not in self._pages: + raise RESTError('PaginationTokenExpiredException', 'Token does not exist') + + generator = self._pages[pagination_token]['gen'] + left_over = self._pages[pagination_token]['misc'] + else: + generator = self._get_tag_keys_generator() + left_over = None + + result = [] + current_tags = 0 + if left_over: + result.append(left_over) + current_tags += 1 + + try: + while True: + # Generator format: ['tag', 'tag', 'tag', ...] + next_item = six.next(generator) + + if current_tags + 1 >= 128: + break + + current_tags += 1 + + result.append(next_item) + + except StopIteration: + # Finished generator before invalidating page limiting constraints + return None, result + + # Didn't hit StopIteration so there's stuff left in generator + new_token = str(uuid.uuid4()) + self._pages[new_token] = {'gen': generator, 'misc': next_item} + + # Token used up, might as well bin now, if you call it again your an idiot + if pagination_token: + del self._pages[pagination_token] + + return new_token, result + + def get_tag_values(self, pagination_token, key): + + if pagination_token: + if pagination_token not in self._pages: + raise RESTError('PaginationTokenExpiredException', 'Token does not exist') + + generator = self._pages[pagination_token]['gen'] + left_over = self._pages[pagination_token]['misc'] + else: + generator = self._get_tag_values_generator(key) + left_over = None + + result = [] + current_tags = 0 + if left_over: + result.append(left_over) + current_tags += 1 + + try: + while True: + # Generator format: ['value', 'value', 'value', ...] + next_item = six.next(generator) + + if current_tags + 1 >= 128: + break + + current_tags += 1 + + result.append(next_item) + + except StopIteration: + # Finished generator before invalidating page limiting constraints + return None, result + + # Didn't hit StopIteration so there's stuff left in generator + new_token = str(uuid.uuid4()) + self._pages[new_token] = {'gen': generator, 'misc': next_item} + + # Token used up, might as well bin now, if you call it again your an idiot + if pagination_token: + del self._pages[pagination_token] + + return new_token, result + + # These methods will be called from responses.py. + # They should call a tag function inside of the moto module + # that governs the resource, that way if the target module + # changes how tags are delt with theres less to change + + # def tag_resources(self, resource_arn_list, tags): + # return failed_resources_map + # + # def untag_resources(self, resource_arn_list, tag_keys): + # return failed_resources_map + + +available_regions = boto3.session.Session().get_available_regions("resourcegroupstaggingapi") +resourcegroupstaggingapi_backends = {region: ResourceGroupsTaggingAPIBackend(region) for region in available_regions} diff --git a/moto/resourcegroupstaggingapi/responses.py b/moto/resourcegroupstaggingapi/responses.py new file mode 100644 index 000000000..966778f29 --- /dev/null +++ b/moto/resourcegroupstaggingapi/responses.py @@ -0,0 +1,97 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import resourcegroupstaggingapi_backends +import json + + +class ResourceGroupsTaggingAPIResponse(BaseResponse): + SERVICE_NAME = 'resourcegroupstaggingapi' + + @property + def backend(self): + """ + Backend + :returns: Resource tagging api backend + :rtype: moto.resourcegroupstaggingapi.models.ResourceGroupsTaggingAPIBackend + """ + return resourcegroupstaggingapi_backends[self.region] + + def get_resources(self): + pagination_token = self._get_param("PaginationToken") + tag_filters = self._get_param("TagFilters", []) + resources_per_page = self._get_int_param("ResourcesPerPage", 50) + tags_per_page = self._get_int_param("TagsPerPage", 100) + resource_type_filters = self._get_param("ResourceTypeFilters", []) + + pagination_token, resource_tag_mapping_list = self.backend.get_resources( + pagination_token=pagination_token, + tag_filters=tag_filters, + resources_per_page=resources_per_page, + tags_per_page=tags_per_page, + resource_type_filters=resource_type_filters, + ) + + # Format tag response + response = { + 'ResourceTagMappingList': resource_tag_mapping_list + } + if pagination_token: + response['PaginationToken'] = pagination_token + + return json.dumps(response) + + def get_tag_keys(self): + pagination_token = self._get_param("PaginationToken") + pagination_token, tag_keys = self.backend.get_tag_keys( + pagination_token=pagination_token, + ) + + response = { + 'TagKeys': tag_keys + } + if pagination_token: + response['PaginationToken'] = pagination_token + + return json.dumps(response) + + def get_tag_values(self): + pagination_token = self._get_param("PaginationToken") + key = self._get_param("Key") + pagination_token, tag_values = self.backend.get_tag_values( + pagination_token=pagination_token, + key=key, + ) + + response = { + 'TagValues': tag_values + } + if pagination_token: + response['PaginationToken'] = pagination_token + + return json.dumps(response) + + # These methods are all thats left to be implemented + # the response is already set up, all thats needed is + # the respective model function to be implemented. + # + # def tag_resources(self): + # resource_arn_list = self._get_list_prefix("ResourceARNList.member") + # tags = self._get_param("Tags") + # failed_resources_map = self.backend.tag_resources( + # resource_arn_list=resource_arn_list, + # tags=tags, + # ) + # + # # failed_resources_map should be {'resource': {'ErrorCode': str, 'ErrorMessage': str, 'StatusCode': int}} + # return json.dumps({'FailedResourcesMap': failed_resources_map}) + # + # def untag_resources(self): + # resource_arn_list = self._get_list_prefix("ResourceARNList.member") + # tag_keys = self._get_list_prefix("TagKeys.member") + # failed_resources_map = self.backend.untag_resources( + # resource_arn_list=resource_arn_list, + # tag_keys=tag_keys, + # ) + # + # # failed_resources_map should be {'resource': {'ErrorCode': str, 'ErrorMessage': str, 'StatusCode': int}} + # return json.dumps({'FailedResourcesMap': failed_resources_map}) diff --git a/moto/resourcegroupstaggingapi/urls.py b/moto/resourcegroupstaggingapi/urls.py new file mode 100644 index 000000000..a972df276 --- /dev/null +++ b/moto/resourcegroupstaggingapi/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import ResourceGroupsTaggingAPIResponse + +url_bases = [ + "https?://tagging.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': ResourceGroupsTaggingAPIResponse.dispatch, +} diff --git a/moto/route53/models.py b/moto/route53/models.py index af8bb690a..61a6609aa 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -24,7 +24,7 @@ class HealthCheck(BaseModel): self.id = health_check_id self.ip_address = health_check_args.get("ip_address") self.port = health_check_args.get("port", 80) - self._type = health_check_args.get("type") + self.type_ = health_check_args.get("type") self.resource_path = health_check_args.get("resource_path") self.fqdn = health_check_args.get("fqdn") self.search_string = health_check_args.get("search_string") @@ -58,7 +58,7 @@ class HealthCheck(BaseModel): {{ health_check.ip_address }} {{ health_check.port }} - {{ health_check._type }} + {{ health_check.type_ }} {{ health_check.resource_path }} {{ health_check.fqdn }} {{ health_check.request_interval }} @@ -76,7 +76,7 @@ class RecordSet(BaseModel): def __init__(self, kwargs): self.name = kwargs.get('Name') - self._type = kwargs.get('Type') + self.type_ = kwargs.get('Type') self.ttl = kwargs.get('TTL') self.records = kwargs.get('ResourceRecords', []) self.set_identifier = kwargs.get('SetIdentifier') @@ -85,6 +85,7 @@ class RecordSet(BaseModel): self.health_check = kwargs.get('HealthCheckId') self.hosted_zone_name = kwargs.get('HostedZoneName') self.hosted_zone_id = kwargs.get('HostedZoneId') + self.alias_target = kwargs.get('AliasTarget') @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): @@ -119,7 +120,7 @@ class RecordSet(BaseModel): properties["HostedZoneId"]) try: - hosted_zone.delete_rrset_by_name(resource_name) + hosted_zone.delete_rrset({'Name': resource_name}) except KeyError: pass @@ -130,7 +131,7 @@ class RecordSet(BaseModel): def to_xml(self): template = Template(""" {{ record_set.name }} - {{ record_set._type }} + {{ record_set.type_ }} {% if record_set.set_identifier %} {{ record_set.set_identifier }} {% endif %} @@ -140,7 +141,16 @@ class RecordSet(BaseModel): {% if record_set.region %} {{ record_set.region }} {% endif %} - {{ record_set.ttl }} + {% if record_set.ttl %} + {{ record_set.ttl }} + {% endif %} + {% if record_set.alias_target %} + + {{ record_set.alias_target['HostedZoneId'] }} + {{ record_set.alias_target['DNSName'] }} + {{ record_set.alias_target['EvaluateTargetHealth'] }} + + {% else %} {% for record in record_set.records %} @@ -148,6 +158,7 @@ class RecordSet(BaseModel): {% endfor %} + {% endif %} {% if record_set.health_check %} {{ record_set.health_check }} {% endif %} @@ -160,7 +171,13 @@ class RecordSet(BaseModel): self.hosted_zone_name) if not hosted_zone: hosted_zone = route53_backend.get_hosted_zone(self.hosted_zone_id) - hosted_zone.delete_rrset_by_name(self.name) + hosted_zone.delete_rrset({'Name': self.name, 'Type': self.type_}) + + +def reverse_domain_name(domain_name): + if domain_name.endswith('.'): # normalize without trailing dot + domain_name = domain_name[:-1] + return '.'.join(reversed(domain_name.split('.'))) class FakeZone(BaseModel): @@ -181,16 +198,20 @@ class FakeZone(BaseModel): def upsert_rrset(self, record_set): new_rrset = RecordSet(record_set) for i, rrset in enumerate(self.rrsets): - if rrset.name == new_rrset.name: + if rrset.name == new_rrset.name and rrset.type_ == new_rrset.type_ and rrset.set_identifier == new_rrset.set_identifier: self.rrsets[i] = new_rrset break else: self.rrsets.append(new_rrset) return new_rrset - def delete_rrset_by_name(self, name): + def delete_rrset(self, rrset): self.rrsets = [ - record_set for record_set in self.rrsets if record_set.name != name] + record_set + for record_set in self.rrsets + if record_set.name != rrset['Name'] or + (rrset.get('Type') is not None and record_set.type_ != rrset['Type']) + ] def delete_rrset_by_id(self, set_identifier): self.rrsets = [ @@ -198,12 +219,15 @@ class FakeZone(BaseModel): def get_record_sets(self, start_type, start_name): record_sets = list(self.rrsets) # Copy the list - if start_type: - record_sets = [ - record_set for record_set in record_sets if record_set._type >= start_type] if start_name: record_sets = [ - record_set for record_set in record_sets if record_set.name >= start_name] + record_set + for record_set in record_sets + if reverse_domain_name(record_set.name) >= reverse_domain_name(start_name) + ] + if start_type: + record_sets = [ + record_set for record_set in record_sets if record_set.type_ >= start_type] return record_sets diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 00e5c60a5..f933c575a 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -123,6 +123,9 @@ class Route53(BaseResponse): """ % (record_set['Name'], the_zone.name) return 400, headers, error_msg + if not record_set['Name'].endswith('.'): + record_set['Name'] += '.' + if action in ('CREATE', 'UPSERT'): if 'ResourceRecords' in record_set: resource_records = list( @@ -131,10 +134,7 @@ class Route53(BaseResponse): # Depending on how many records there are, this may # or may not be a list resource_records = [resource_records] - record_values = [x['Value'] for x in resource_records] - elif 'AliasTarget' in record_set: - record_values = [record_set['AliasTarget']['DNSName']] - record_set['ResourceRecords'] = record_values + record_set['ResourceRecords'] = [x['Value'] for x in resource_records] if action == 'CREATE': the_zone.add_rrset(record_set) else: @@ -144,13 +144,13 @@ class Route53(BaseResponse): the_zone.delete_rrset_by_id( record_set["SetIdentifier"]) else: - the_zone.delete_rrset_by_name(record_set["Name"]) + the_zone.delete_rrset(record_set) return 200, headers, CHANGE_RRSET_RESPONSE elif method == "GET": querystring = parse_qs(parsed_url.query) - template = Template(LIST_RRSET_REPONSE) + template = Template(LIST_RRSET_RESPONSE) start_type = querystring.get("type", [None])[0] start_name = querystring.get("name", [None])[0] record_sets = the_zone.get_record_sets(start_type, start_name) @@ -182,9 +182,9 @@ class Route53(BaseResponse): elif method == "DELETE": health_check_id = parsed_url.path.split("/")[-1] route53_backend.delete_health_check(health_check_id) - return 200, headers, DELETE_HEALTH_CHECK_REPONSE + return 200, headers, DELETE_HEALTH_CHECK_RESPONSE elif method == "GET": - template = Template(LIST_HEALTH_CHECKS_REPONSE) + template = Template(LIST_HEALTH_CHECKS_RESPONSE) health_checks = route53_backend.get_health_checks() return 200, headers, template.render(health_checks=health_checks) @@ -248,7 +248,7 @@ CHANGE_TAGS_FOR_RESOURCE_RESPONSE = """ +LIST_RRSET_RESPONSE = """ {% for record_set in record_sets %} {{ record_set.to_xml() }} @@ -350,7 +350,7 @@ CREATE_HEALTH_CHECK_RESPONSE = """ {{ health_check.to_xml() }} """ -LIST_HEALTH_CHECKS_REPONSE = """ +LIST_HEALTH_CHECKS_RESPONSE = """ {% for health_check in health_checks %} @@ -361,6 +361,6 @@ LIST_HEALTH_CHECKS_REPONSE = """ {{ health_checks|length }} """ -DELETE_HEALTH_CHECK_REPONSE = """ +DELETE_HEALTH_CHECK_RESPONSE = """ """ diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 24704e7ef..27c842111 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -111,3 +111,91 @@ class MalformedXML(S3ClientError): "MalformedXML", "The XML you provided was not well-formed or did not validate against our published schema", *args, **kwargs) + + +class MalformedACLError(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(MalformedACLError, self).__init__( + "MalformedACLError", + "The XML you provided was not well-formed or did not validate against our published schema", + *args, **kwargs) + + +class InvalidTargetBucketForLogging(S3ClientError): + code = 400 + + def __init__(self, msg): + super(InvalidTargetBucketForLogging, self).__init__("InvalidTargetBucketForLogging", msg) + + +class CrossLocationLoggingProhibitted(S3ClientError): + code = 403 + + def __init__(self): + super(CrossLocationLoggingProhibitted, self).__init__( + "CrossLocationLoggingProhibitted", + "Cross S3 location logging not allowed." + ) + + +class InvalidNotificationARN(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidNotificationARN, self).__init__( + "InvalidArgument", + "The ARN is not well formed", + *args, **kwargs) + + +class InvalidNotificationDestination(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidNotificationDestination, self).__init__( + "InvalidArgument", + "The notification destination service region is not valid for the bucket location constraint", + *args, **kwargs) + + +class InvalidNotificationEvent(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidNotificationEvent, self).__init__( + "InvalidArgument", + "The event is not supported for notifications", + *args, **kwargs) + + +class InvalidStorageClass(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidStorageClass, self).__init__( + "InvalidStorageClass", + "The storage class you specified is not valid", + *args, **kwargs) + + +class InvalidBucketName(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidBucketName, self).__init__( + "InvalidBucketName", + "The specified bucket is not valid.", + *args, **kwargs + ) + + +class DuplicateTagKeys(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(DuplicateTagKeys, self).__init__( + "InvalidTag", + "Cannot provide multiple Tags with the same key", + *args, **kwargs) diff --git a/moto/s3/models.py b/moto/s3/models.py index 91d3c1e2d..2a628d681 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -6,23 +6,44 @@ import hashlib import copy import itertools import codecs +import random +import string +import tempfile +import sys +import uuid + import six from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey +from .exceptions import ( + BucketAlreadyExists, MissingBucket, InvalidBucketName, InvalidPart, InvalidRequest, + EntityTooSmall, MissingKey, InvalidNotificationDestination, MalformedXML, InvalidStorageClass, + InvalidTargetBucketForLogging, DuplicateTagKeys, CrossLocationLoggingProhibitted +) from .utils import clean_key_name, _VersionedKeyStore +MAX_BUCKET_NAME_LENGTH = 63 +MIN_BUCKET_NAME_LENGTH = 3 UPLOAD_ID_BYTES = 43 UPLOAD_PART_MIN_SIZE = 5242880 +STORAGE_CLASS = ["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA", "ONEZONE_IA"] +DEFAULT_KEY_BUFFER_SIZE = 16 * 1024 * 1024 +DEFAULT_TEXT_ENCODING = sys.getdefaultencoding() class FakeDeleteMarker(BaseModel): def __init__(self, key): self.key = key - self._version_id = key.version_id + 1 + self.name = key.name + self.last_modified = datetime.datetime.utcnow() + self._version_id = str(uuid.uuid4()) + + @property + def last_modified_ISO8601(self): + return iso_8601_datetime_with_milliseconds(self.last_modified) @property def version_id(self): @@ -31,9 +52,18 @@ class FakeDeleteMarker(BaseModel): class FakeKey(BaseModel): - def __init__(self, name, value, storage="STANDARD", etag=None, is_versioned=False, version_id=0): + def __init__( + self, + name, + value, + storage="STANDARD", + etag=None, + is_versioned=False, + version_id=0, + max_buffer_size=DEFAULT_KEY_BUFFER_SIZE, + multipart=None + ): self.name = name - self.value = value self.last_modified = datetime.datetime.utcnow() self.acl = get_canned_acl('private') self.website_redirect_location = None @@ -44,15 +74,39 @@ class FakeKey(BaseModel): self._version_id = version_id self._is_versioned = is_versioned self._tagging = FakeTagging() + self.multipart = multipart + + self._value_buffer = tempfile.SpooledTemporaryFile(max_size=max_buffer_size) + self._max_buffer_size = max_buffer_size + self.value = value @property def version_id(self): return self._version_id - def copy(self, new_name=None): + @property + def value(self): + self._value_buffer.seek(0) + return self._value_buffer.read() + + @value.setter + def value(self, new_value): + self._value_buffer.seek(0) + self._value_buffer.truncate() + + # Hack for working around moto's own unit tests; this probably won't + # actually get hit in normal use. + if isinstance(new_value, six.text_type): + new_value = new_value.encode(DEFAULT_TEXT_ENCODING) + self._value_buffer.write(new_value) + + def copy(self, new_name=None, new_is_versioned=None): r = copy.deepcopy(self) if new_name is not None: r.name = new_name + if new_is_versioned is not None: + r._is_versioned = new_is_versioned + r.refresh_version() return r def set_metadata(self, metadata, replace=False): @@ -63,36 +117,43 @@ class FakeKey(BaseModel): def set_tagging(self, tagging): self._tagging = tagging - def set_storage_class(self, storage_class): - self._storage_class = storage_class + def set_storage_class(self, storage): + if storage is not None and storage not in STORAGE_CLASS: + raise InvalidStorageClass(storage=storage) + self._storage_class = storage def set_acl(self, acl): self.acl = acl def append_to_value(self, value): - self.value += value + self._value_buffer.seek(0, os.SEEK_END) + self._value_buffer.write(value) + self.last_modified = datetime.datetime.utcnow() self._etag = None # must recalculate etag if self._is_versioned: - self._version_id += 1 + self._version_id = str(uuid.uuid4()) else: - self._is_versioned = 0 + self._version_id = None def restore(self, days): self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days) - def increment_version(self): - self._version_id += 1 + def refresh_version(self): + self._version_id = str(uuid.uuid4()) + self.last_modified = datetime.datetime.utcnow() @property def etag(self): if self._etag is None: value_md5 = hashlib.md5() - if isinstance(self.value, six.text_type): - value = self.value.encode("utf-8") - else: - value = self.value - value_md5.update(value) + self._value_buffer.seek(0) + while True: + block = self._value_buffer.read(DEFAULT_KEY_BUFFER_SIZE) + if not block: + break + value_md5.update(block) + self._etag = value_md5.hexdigest() return '"{0}"'.format(self._etag) @@ -119,7 +180,7 @@ class FakeKey(BaseModel): res = { 'ETag': self.etag, 'last-modified': self.last_modified_RFC1123, - 'content-length': str(len(self.value)), + 'content-length': str(self.size), } if self._storage_class != 'STANDARD': res['x-amz-storage-class'] = self._storage_class @@ -137,7 +198,8 @@ class FakeKey(BaseModel): @property def size(self): - return len(self.value) + self._value_buffer.seek(0, os.SEEK_END) + return self._value_buffer.tell() @property def storage_class(self): @@ -148,6 +210,26 @@ class FakeKey(BaseModel): if self._expiry is not None: return self._expiry.strftime("%a, %d %b %Y %H:%M:%S GMT") + # Keys need to be pickleable due to some implementation details of boto3. + # Since file objects aren't pickleable, we need to override the default + # behavior. The following is adapted from the Python docs: + # https://docs.python.org/3/library/pickle.html#handling-stateful-objects + def __getstate__(self): + state = self.__dict__.copy() + state['value'] = self.value + del state['_value_buffer'] + return state + + def __setstate__(self, state): + self.__dict__.update({ + k: v for k, v in six.iteritems(state) + if k != 'value' + }) + + self._value_buffer = \ + tempfile.SpooledTemporaryFile(max_size=self._max_buffer_size) + self.value = state['value'] + class FakeMultipart(BaseModel): @@ -168,11 +250,14 @@ class FakeMultipart(BaseModel): count = 0 for pn, etag in body: part = self.parts.get(pn) - if part is None or part.etag != etag: + part_etag = None + if part is not None: + part_etag = part.etag.replace('"', '') + etag = etag.replace('"', '') + if part is None or part_etag != etag: raise InvalidPart() if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE: raise EntityTooSmall() - part_etag = part.etag.replace('"', '') md5s.extend(decode_hex(part_etag)[0]) total.extend(part.value) last = part @@ -270,7 +355,7 @@ def get_canned_acl(acl): grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ])) elif acl == 'public-read-write': grants.append(FakeGrant([ALL_USERS_GRANTEE], [ - PERMISSION_READ, PERMISSION_WRITE])) + PERMISSION_READ, PERMISSION_WRITE])) elif acl == 'authenticated-read': grants.append( FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ])) @@ -282,7 +367,7 @@ def get_canned_acl(acl): pass # TODO: bucket owner, EC2 Read elif acl == 'log-delivery-write': grants.append(FakeGrant([LOG_DELIVERY_GRANTEE], [ - PERMISSION_READ_ACP, PERMISSION_WRITE])) + PERMISSION_READ_ACP, PERMISSION_WRITE])) else: assert False, 'Unknown canned acl: %s' % (acl,) return FakeAcl(grants=grants) @@ -307,19 +392,41 @@ class FakeTag(BaseModel): self.value = value +class LifecycleFilter(BaseModel): + + def __init__(self, prefix=None, tag=None, and_filter=None): + self.prefix = prefix or '' + self.tag = tag + self.and_filter = and_filter + + +class LifecycleAndFilter(BaseModel): + + def __init__(self, prefix=None, tags=None): + self.prefix = prefix or '' + self.tags = tags + + class LifecycleRule(BaseModel): - def __init__(self, id=None, prefix=None, status=None, expiration_days=None, - expiration_date=None, transition_days=None, - transition_date=None, storage_class=None): + def __init__(self, id=None, prefix=None, lc_filter=None, status=None, expiration_days=None, + expiration_date=None, transition_days=None, transition_date=None, storage_class=None, + expired_object_delete_marker=None, nve_noncurrent_days=None, nvt_noncurrent_days=None, + nvt_storage_class=None, aimu_days=None): self.id = id self.prefix = prefix + self.filter = lc_filter self.status = status self.expiration_days = expiration_days self.expiration_date = expiration_date self.transition_days = transition_days self.transition_date = transition_date self.storage_class = storage_class + self.expired_object_delete_marker = expired_object_delete_marker + self.nve_noncurrent_days = nve_noncurrent_days + self.nvt_noncurrent_days = nvt_noncurrent_days + self.nvt_storage_class = nvt_storage_class + self.aimu_days = aimu_days class CorsRule(BaseModel): @@ -333,6 +440,26 @@ class CorsRule(BaseModel): self.max_age_seconds = max_age_seconds +class Notification(BaseModel): + + def __init__(self, arn, events, filters=None, id=None): + self.id = id if id else ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(50)) + self.arn = arn + self.events = events + self.filters = filters if filters else {} + + +class NotificationConfiguration(BaseModel): + + def __init__(self, topic=None, queue=None, cloud_function=None): + self.topic = [Notification(t["Topic"], t["Event"], filters=t.get("Filter"), id=t.get("Id")) for t in topic] \ + if topic else [] + self.queue = [Notification(q["Queue"], q["Event"], filters=q.get("Filter"), id=q.get("Id")) for q in queue] \ + if queue else [] + self.cloud_function = [Notification(c["CloudFunction"], c["Event"], filters=c.get("Filter"), id=c.get("Id")) + for c in cloud_function] if cloud_function else [] + + class FakeBucket(BaseModel): def __init__(self, name, region_name): @@ -347,6 +474,9 @@ class FakeBucket(BaseModel): self.acl = get_canned_acl('private') self.tags = FakeTagging() self.cors = [] + self.logging = {} + self.notification_configuration = None + self.accelerate_configuration = None @property def location(self): @@ -359,25 +489,88 @@ class FakeBucket(BaseModel): def set_lifecycle(self, rules): self.rules = [] for rule in rules: + # Extract and validate actions from Lifecycle rule expiration = rule.get('Expiration') transition = rule.get('Transition') + + nve_noncurrent_days = None + if rule.get('NoncurrentVersionExpiration') is not None: + if rule["NoncurrentVersionExpiration"].get('NoncurrentDays') is None: + raise MalformedXML() + nve_noncurrent_days = rule["NoncurrentVersionExpiration"]["NoncurrentDays"] + + nvt_noncurrent_days = None + nvt_storage_class = None + if rule.get('NoncurrentVersionTransition') is not None: + if rule["NoncurrentVersionTransition"].get('NoncurrentDays') is None: + raise MalformedXML() + if rule["NoncurrentVersionTransition"].get('StorageClass') is None: + raise MalformedXML() + nvt_noncurrent_days = rule["NoncurrentVersionTransition"]["NoncurrentDays"] + nvt_storage_class = rule["NoncurrentVersionTransition"]["StorageClass"] + + aimu_days = None + if rule.get('AbortIncompleteMultipartUpload') is not None: + if rule["AbortIncompleteMultipartUpload"].get('DaysAfterInitiation') is None: + raise MalformedXML() + aimu_days = rule["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] + + eodm = None + if expiration and expiration.get("ExpiredObjectDeleteMarker") is not None: + # This cannot be set if Date or Days is set: + if expiration.get("Days") or expiration.get("Date"): + raise MalformedXML() + eodm = expiration["ExpiredObjectDeleteMarker"] + + # Pull out the filter: + lc_filter = None + if rule.get("Filter"): + # Can't have both `Filter` and `Prefix` (need to check for the presence of the key): + try: + if rule["Prefix"] or not rule["Prefix"]: + raise MalformedXML() + except KeyError: + pass + + and_filter = None + if rule["Filter"].get("And"): + and_tags = [] + if rule["Filter"]["And"].get("Tag"): + if not isinstance(rule["Filter"]["And"]["Tag"], list): + rule["Filter"]["And"]["Tag"] = [rule["Filter"]["And"]["Tag"]] + + for t in rule["Filter"]["And"]["Tag"]: + and_tags.append(FakeTag(t["Key"], t.get("Value", ''))) + + and_filter = LifecycleAndFilter(prefix=rule["Filter"]["And"]["Prefix"], tags=and_tags) + + filter_tag = None + if rule["Filter"].get("Tag"): + filter_tag = FakeTag(rule["Filter"]["Tag"]["Key"], rule["Filter"]["Tag"].get("Value", '')) + + lc_filter = LifecycleFilter(prefix=rule["Filter"]["Prefix"], tag=filter_tag, and_filter=and_filter) + self.rules.append(LifecycleRule( id=rule.get('ID'), prefix=rule.get('Prefix'), + lc_filter=lc_filter, status=rule['Status'], expiration_days=expiration.get('Days') if expiration else None, expiration_date=expiration.get('Date') if expiration else None, transition_days=transition.get('Days') if transition else None, transition_date=transition.get('Date') if transition else None, - storage_class=transition[ - 'StorageClass'] if transition else None, + storage_class=transition.get('StorageClass') if transition else None, + expired_object_delete_marker=eodm, + nve_noncurrent_days=nve_noncurrent_days, + nvt_noncurrent_days=nvt_noncurrent_days, + nvt_storage_class=nvt_storage_class, + aimu_days=aimu_days, )) def delete_lifecycle(self): self.rules = [] def set_cors(self, rules): - from moto.s3.exceptions import InvalidRequest, MalformedXML self.cors = [] if len(rules) > 100: @@ -422,6 +615,65 @@ class FakeBucket(BaseModel): def tagging(self): return self.tags + def set_logging(self, logging_config, bucket_backend): + if not logging_config: + self.logging = {} + return + + # Target bucket must exist in the same account (assuming all moto buckets are in the same account): + if not bucket_backend.buckets.get(logging_config["TargetBucket"]): + raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.") + + # Does the target bucket have the log-delivery WRITE and READ_ACP permissions? + write = read_acp = False + for grant in bucket_backend.buckets[logging_config["TargetBucket"]].acl.grants: + # Must be granted to: http://acs.amazonaws.com/groups/s3/LogDelivery + for grantee in grant.grantees: + if grantee.uri == "http://acs.amazonaws.com/groups/s3/LogDelivery": + if "WRITE" in grant.permissions or "FULL_CONTROL" in grant.permissions: + write = True + + if "READ_ACP" in grant.permissions or "FULL_CONTROL" in grant.permissions: + read_acp = True + + break + + if not write or not read_acp: + raise InvalidTargetBucketForLogging("You must give the log-delivery group WRITE and READ_ACP" + " permissions to the target bucket") + + # Buckets must also exist within the same region: + if bucket_backend.buckets[logging_config["TargetBucket"]].region_name != self.region_name: + raise CrossLocationLoggingProhibitted() + + # Checks pass -- set the logging config: + self.logging = logging_config + + def set_notification_configuration(self, notification_config): + if not notification_config: + self.notification_configuration = None + return + + self.notification_configuration = NotificationConfiguration( + topic=notification_config.get("TopicConfiguration"), + queue=notification_config.get("QueueConfiguration"), + cloud_function=notification_config.get("CloudFunctionConfiguration") + ) + + # Validate that the region is correct: + for thing in ["topic", "queue", "cloud_function"]: + for t in getattr(self.notification_configuration, thing): + region = t.arn.split(":")[3] + if region != self.region_name: + raise InvalidNotificationDestination() + + def set_accelerate_configuration(self, accelerate_config): + if self.accelerate_configuration is None and accelerate_config == 'Suspended': + # Cannot "suspend" a not active acceleration. Leaves it undefined + return + + self.accelerate_configuration = accelerate_config + def set_website_configuration(self, website_configuration): self.website_configuration = website_configuration @@ -457,6 +709,8 @@ class S3Backend(BaseBackend): def create_bucket(self, bucket_name, region_name): if bucket_name in self.buckets: raise BucketAlreadyExists(bucket=bucket_name) + if not MIN_BUCKET_NAME_LENGTH <= len(bucket_name) <= MAX_BUCKET_NAME_LENGTH: + raise InvalidBucketName() new_bucket = FakeBucket(name=bucket_name, region_name=region_name) self.buckets[bucket_name] = new_bucket return new_bucket @@ -486,20 +740,18 @@ class S3Backend(BaseBackend): def get_bucket_latest_versions(self, bucket_name): versions = self.get_bucket_versions(bucket_name) - maximum_version_per_key = {} + latest_modified_per_key = {} latest_versions = {} for version in versions: - if isinstance(version, FakeDeleteMarker): - name = version.key.name - else: - name = version.name + name = version.name + last_modified = version.last_modified version_id = version.version_id - maximum_version_per_key[name] = max( - version_id, - maximum_version_per_key.get(name, -1) + latest_modified_per_key[name] = max( + last_modified, + latest_modified_per_key.get(name, datetime.datetime.min) ) - if version_id == maximum_version_per_key[name]: + if last_modified == latest_modified_per_key[name]: latest_versions[name] = version_id return latest_versions @@ -540,25 +792,36 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) return bucket.website_configuration - def set_key(self, bucket_name, key_name, value, storage=None, etag=None): + def set_key( + self, + bucket_name, + key_name, + value, + storage=None, + etag=None, + multipart=None, + ): key_name = clean_key_name(key_name) + if storage is not None and storage not in STORAGE_CLASS: + raise InvalidStorageClass(storage=storage) bucket = self.get_bucket(bucket_name) - old_key = bucket.keys.get(key_name, None) - if old_key is not None and bucket.is_versioned: - new_version_id = old_key._version_id + 1 - else: - new_version_id = 0 - new_key = FakeKey( name=key_name, value=value, storage=storage, etag=etag, is_versioned=bucket.is_versioned, - version_id=new_version_id) - bucket.keys[key_name] = new_key + version_id=str(uuid.uuid4()) if bucket.is_versioned else None, + multipart=multipart, + ) + + keys = [ + key for key in bucket.keys.getlist(key_name, []) + if key.version_id != new_key.version_id + ] + [new_key] + bucket.keys.setlist(key_name, keys) return new_key @@ -569,7 +832,7 @@ class S3Backend(BaseBackend): key.append_to_value(value) return key - def get_key(self, bucket_name, key_name, version_id=None): + def get_key(self, bucket_name, key_name, version_id=None, part_number=None): key_name = clean_key_name(key_name) bucket = self.get_bucket(bucket_name) key = None @@ -579,11 +842,14 @@ class S3Backend(BaseBackend): if key_name in bucket.keys: key = bucket.keys[key_name] else: - for key_version in bucket.keys.getlist(key_name): + for key_version in bucket.keys.getlist(key_name, default=[]): if str(key_version.version_id) == str(version_id): key = key_version break + if part_number and key.multipart: + key = key.multipart.parts[part_number] + if isinstance(key, FakeKey): return key else: @@ -597,6 +863,9 @@ class S3Backend(BaseBackend): return key def put_bucket_tagging(self, bucket_name, tagging): + tag_keys = [tag.key for tag in tagging.tag_set.tags] + if len(tag_keys) != len(set(tag_keys)): + raise DuplicateTagKeys() bucket = self.get_bucket(bucket_name) bucket.set_tags(tagging) @@ -608,10 +877,27 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.set_cors(cors_rules) + def put_bucket_logging(self, bucket_name, logging_config): + bucket = self.get_bucket(bucket_name) + bucket.set_logging(logging_config, self) + def delete_bucket_cors(self, bucket_name): bucket = self.get_bucket(bucket_name) bucket.delete_cors() + def put_bucket_notification_configuration(self, bucket_name, notification_config): + bucket = self.get_bucket(bucket_name) + bucket.set_notification_configuration(notification_config) + + def put_bucket_accelerate_configuration(self, bucket_name, accelerate_configuration): + if accelerate_configuration not in ['Enabled', 'Suspended']: + raise MalformedXML() + + bucket = self.get_bucket(bucket_name) + if bucket.name.find('.') != -1: + raise InvalidRequest('PutBucketAccelerateConfiguration') + bucket.set_accelerate_configuration(accelerate_configuration) + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) @@ -627,7 +913,12 @@ class S3Backend(BaseBackend): return del bucket.multiparts[multipart_id] - key = self.set_key(bucket_name, multipart.key_name, value, etag=etag) + key = self.set_key( + bucket_name, + multipart.key_name, + value, etag=etag, + multipart=multipart + ) key.set_metadata(multipart.metadata) return key @@ -649,12 +940,11 @@ class S3Backend(BaseBackend): return multipart.set_part(part_id, value) def copy_part(self, dest_bucket_name, multipart_id, part_id, - src_bucket_name, src_key_name, start_byte, end_byte): - src_key_name = clean_key_name(src_key_name) - src_bucket = self.get_bucket(src_bucket_name) + src_bucket_name, src_key_name, src_version_id, start_byte, end_byte): dest_bucket = self.get_bucket(dest_bucket_name) multipart = dest_bucket.multiparts[multipart_id] - src_value = src_bucket.keys[src_key_name].value + + src_value = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id).value if start_byte is not None: src_value = src_value[start_byte:end_byte + 1] return multipart.set_part(part_id, src_value) @@ -683,6 +973,7 @@ class S3Backend(BaseBackend): else: key_results.add(key) + key_results = filter(lambda key: not isinstance(key, FakeDeleteMarker), key_results) key_results = sorted(key_results, key=lambda key: key.name) folder_results = [folder_name for folder_name in sorted( folder_results, key=lambda key: key)] @@ -716,6 +1007,9 @@ class S3Backend(BaseBackend): if str(key.version_id) != str(version_id) ] ) + + if not bucket.keys.getlist(key_name): + bucket.keys.pop(key_name) return True except KeyError: return False @@ -727,17 +1021,15 @@ class S3Backend(BaseBackend): dest_bucket = self.get_bucket(dest_bucket_name) key = self.get_key(src_bucket_name, src_key_name, version_id=src_version_id) - if dest_key_name != src_key_name: - key = key.copy(dest_key_name) - dest_bucket.keys[dest_key_name] = key - # By this point, the destination key must exist, or KeyError - if dest_bucket.is_versioned: - dest_bucket.keys[dest_key_name].increment_version() + new_key = key.copy(dest_key_name, dest_bucket.is_versioned) + if storage is not None: - key.set_storage_class(storage) + new_key.set_storage_class(storage) if acl is not None: - key.set_acl(acl) + new_key.set_acl(acl) + + dest_bucket.keys[dest_key_name] = new_key def set_bucket_acl(self, bucket_name, acl): bucket = self.get_bucket(bucket_name) diff --git a/moto/s3/responses.py b/moto/s3/responses.py old mode 100755 new mode 100644 index fb1735a5c..6ba7a52c6 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -4,22 +4,25 @@ import re import six from moto.core.utils import str_to_rfc_1123_datetime -from six.moves.urllib.parse import parse_qs, urlparse +from six.moves.urllib.parse import parse_qs, urlparse, unquote import xmltodict from moto.packages.httpretty.core import HTTPrettyRequest from moto.core.responses import _TemplateEnvironmentMixin +from moto.core.utils import path_url -from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys +from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, \ + parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys - -from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder -from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, FakeTag -from .utils import bucket_name_from_url, metadata_from_headers +from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder, MalformedXML, \ + MalformedACLError, InvalidNotificationARN, InvalidNotificationEvent +from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \ + FakeTag +from .utils import bucket_name_from_url, clean_key_name, metadata_from_headers, parse_region_from_url from xml.dom import minidom -REGION_URL_REGEX = r'\.s3-(.+?)\.amazonaws\.com' + DEFAULT_REGION_NAME = 'us-east-1' @@ -55,8 +58,11 @@ class ResponseObject(_TemplateEnvironmentMixin): if not host: host = urlparse(request.url).netloc - if not host or host.startswith("localhost") or re.match(r"^[^.]+$", host): - # For localhost or local domain names, default to path-based buckets + if (not host or host.startswith('localhost') or host.startswith('localstack') or + re.match(r'^[^.]+$', host) or re.match(r'^.*\.svc\.cluster\.local$', host)): + # Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev), + # (3) local host names that do not contain a "." (e.g., Docker container host names), or + # (4) kubernetes host names return False match = re.match(r'^([^\[\]:]+)(:\d+)?$', host) @@ -68,8 +74,9 @@ class ResponseObject(_TemplateEnvironmentMixin): match = re.match(r'^\[(.+)\](:\d+)?$', host) if match: - match = re.match(r'^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z', - match.groups()[0], re.IGNORECASE) + match = re.match( + r'^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z', + match.groups()[0], re.IGNORECASE) if match: return False @@ -123,10 +130,7 @@ class ResponseObject(_TemplateEnvironmentMixin): parsed_url = urlparse(full_url) querystring = parse_qs(parsed_url.query, keep_blank_values=True) method = request.method - region_name = DEFAULT_REGION_NAME - region_match = re.search(REGION_URL_REGEX, full_url) - if region_match: - region_name = region_match.groups()[0] + region_name = parse_region_from_url(full_url) bucket_name = self.parse_bucket_name_from_url(request, full_url) if not bucket_name: @@ -167,7 +171,7 @@ class ResponseObject(_TemplateEnvironmentMixin): # HEAD (which the real API responds with), and instead # raises NoSuchBucket, leading to inconsistency in # error response between real and mocked responses. - return 404, {}, "Not Found" + return 404, {}, "" return 200, {}, "" def _bucket_response_get(self, bucket_name, querystring, headers): @@ -189,7 +193,13 @@ class ResponseObject(_TemplateEnvironmentMixin): elif 'location' in querystring: bucket = self.backend.get_bucket(bucket_name) template = self.response_template(S3_BUCKET_LOCATION) - return template.render(location=bucket.location) + + location = bucket.location + # us-east-1 is different - returns a None location + if location == DEFAULT_REGION_NAME: + location = None + + return template.render(location=location) elif 'lifecycle' in querystring: bucket = self.backend.get_bucket(bucket_name) if not bucket.rules: @@ -227,6 +237,13 @@ class ResponseObject(_TemplateEnvironmentMixin): return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_TAGGING_RESPONSE) return template.render(bucket=bucket) + elif 'logging' in querystring: + bucket = self.backend.get_bucket(bucket_name) + if not bucket.logging: + template = self.response_template(S3_NO_LOGGING_CONFIG) + return 200, {}, template.render() + template = self.response_template(S3_LOGGING_CONFIG) + return 200, {}, template.render(logging=bucket.logging) elif "cors" in querystring: bucket = self.backend.get_bucket(bucket_name) if len(bucket.cors) == 0: @@ -234,6 +251,20 @@ class ResponseObject(_TemplateEnvironmentMixin): return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_CORS_RESPONSE) return template.render(bucket=bucket) + elif "notification" in querystring: + bucket = self.backend.get_bucket(bucket_name) + if not bucket.notification_configuration: + return 200, {}, "" + template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) + return template.render(bucket=bucket) + elif "accelerate" in querystring: + bucket = self.backend.get_bucket(bucket_name) + if bucket.accelerate_configuration is None: + template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET) + return 200, {}, template.render() + template = self.response_template(S3_BUCKET_ACCELERATE) + return template.render(bucket=bucket) + elif 'versions' in querystring: delimiter = querystring.get('delimiter', [None])[0] encoding_type = querystring.get('encoding-type', [None])[0] @@ -320,10 +351,15 @@ class ResponseObject(_TemplateEnvironmentMixin): if continuation_token or start_after: limit = continuation_token or start_after - result_keys = self._get_results_from_token(result_keys, limit) + if not delimiter: + result_keys = self._get_results_from_token(result_keys, limit) + else: + result_folders = self._get_results_from_token(result_folders, limit) - result_keys, is_truncated, \ - next_continuation_token = self._truncate_result(result_keys, max_keys) + if not delimiter: + result_keys, is_truncated, next_continuation_token = self._truncate_result(result_keys, max_keys) + else: + result_folders, is_truncated, next_continuation_token = self._truncate_result(result_folders, max_keys) return template.render( bucket=bucket, @@ -341,7 +377,7 @@ class ResponseObject(_TemplateEnvironmentMixin): def _get_results_from_token(self, result_keys, token): continuation_index = 0 for key in result_keys: - if key.name > token: + if (key.name if isinstance(key, FakeKey) else key) > token: break continuation_index += 1 return result_keys[continuation_index:] @@ -350,7 +386,8 @@ class ResponseObject(_TemplateEnvironmentMixin): if len(result_keys) > max_keys: is_truncated = 'true' result_keys = result_keys[:max_keys] - next_continuation_token = result_keys[-1].name + item = result_keys[-1] + next_continuation_token = (item.name if isinstance(item, FakeKey) else item) else: is_truncated = 'false' next_continuation_token = None @@ -378,8 +415,11 @@ class ResponseObject(_TemplateEnvironmentMixin): self.backend.set_bucket_policy(bucket_name, body) return 'True' elif 'acl' in querystring: - # TODO: Support the XML-based ACL format - self.backend.set_bucket_acl(bucket_name, self._acl_from_headers(request.headers)) + # Headers are first. If not set, then look at the body (consistent with the documentation): + acls = self._acl_from_headers(request.headers) + if not acls: + acls = self._acl_from_xml(body) + self.backend.set_bucket_acl(bucket_name, acls) return "" elif "tagging" in querystring: tagging = self._bucket_tagging_from_xml(body) @@ -389,16 +429,51 @@ class ResponseObject(_TemplateEnvironmentMixin): self.backend.set_bucket_website_configuration(bucket_name, body) return "" elif "cors" in querystring: - from moto.s3.exceptions import MalformedXML try: self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body)) return "" except KeyError: raise MalformedXML() + elif "logging" in querystring: + try: + self.backend.put_bucket_logging(bucket_name, self._logging_from_xml(body)) + return "" + except KeyError: + raise MalformedXML() + elif "notification" in querystring: + try: + self.backend.put_bucket_notification_configuration(bucket_name, + self._notification_config_from_xml(body)) + return "" + except KeyError: + raise MalformedXML() + except Exception as e: + raise e + elif "accelerate" in querystring: + try: + accelerate_status = self._accelerate_config_from_xml(body) + self.backend.put_bucket_accelerate_configuration(bucket_name, accelerate_status) + return "" + except KeyError: + raise MalformedXML() + except Exception as e: + raise e + else: if body: + # us-east-1, the default AWS region behaves a bit differently + # - you should not use it as a location constraint --> it fails + # - querying the location constraint returns None try: - region_name = xmltodict.parse(body)['CreateBucketConfiguration']['LocationConstraint'] + forced_region = xmltodict.parse(body)['CreateBucketConfiguration']['LocationConstraint'] + + if forced_region == DEFAULT_REGION_NAME: + raise S3ClientError( + 'InvalidLocationConstraint', + 'The specified location-constraint is not valid' + ) + else: + region_name = forced_region except KeyError: pass @@ -453,7 +528,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if isinstance(request, HTTPrettyRequest): path = request.path else: - path = request.full_path if hasattr(request, 'full_path') else request.path_url + path = request.full_path if hasattr(request, 'full_path') else path_url(request.url) if self.is_delete_keys(request, path, bucket_name): return self._bucket_response_delete_keys(request, body, bucket_name, headers) @@ -513,6 +588,7 @@ class ResponseObject(_TemplateEnvironmentMixin): def toint(i): return int(i) if i else None + begin, end = map(toint, rspec.split('-')) if begin is not None: # byte range end = last if end is None else min(end, last) @@ -581,7 +657,7 @@ class ResponseObject(_TemplateEnvironmentMixin): body = b'' if method == 'GET': - return self._key_response_get(bucket_name, query, key_name, headers) + return self._key_response_get(bucket_name, query, key_name, headers=request.headers) elif method == 'PUT': return self._key_response_put(request, body, bucket_name, query, key_name, headers) elif method == 'HEAD': @@ -608,10 +684,15 @@ class ResponseObject(_TemplateEnvironmentMixin): parts=parts ) version_id = query.get('versionId', [None])[0] + if_modified_since = headers.get('If-Modified-Since', None) key = self.backend.get_key( bucket_name, key_name, version_id=version_id) if key is None: raise MissingKey(key_name) + if if_modified_since: + if_modified_since = str_to_rfc_1123_datetime(if_modified_since) + if if_modified_since and key.last_modified < if_modified_since: + return 304, response_headers, 'Not Modified' if 'acl' in query: template = self.response_template(S3_OBJECT_ACL_RESPONSE) return 200, response_headers, template.render(obj=key) @@ -629,8 +710,10 @@ class ResponseObject(_TemplateEnvironmentMixin): upload_id = query['uploadId'][0] part_number = int(query['partNumber'][0]) if 'x-amz-copy-source' in request.headers: - src = request.headers.get("x-amz-copy-source").lstrip("/") + src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/") src_bucket, src_key = src.split("/", 1) + + src_key, src_version_id = src_key.split("?versionId=") if "?versionId=" in src_key else (src_key, None) src_range = request.headers.get( 'x-amz-copy-source-range', '').split("bytes=")[-1] @@ -640,9 +723,13 @@ class ResponseObject(_TemplateEnvironmentMixin): except ValueError: start_byte, end_byte = None, None - key = self.backend.copy_part( - bucket_name, upload_id, part_number, src_bucket, - src_key, start_byte, end_byte) + if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + key = self.backend.copy_part( + bucket_name, upload_id, part_number, src_bucket, + src_key, src_version_id, start_byte, end_byte) + else: + return 404, response_headers, "" + template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE) response = template.render(part=key) else: @@ -671,12 +758,23 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'x-amz-copy-source' in request.headers: # Copy key - src_key_parsed = urlparse(request.headers.get("x-amz-copy-source")) - src_bucket, src_key = src_key_parsed.path.lstrip("/").split("/", 1) + # you can have a quoted ?version=abc with a version Id, so work on + # we need to parse the unquoted string first + src_key = clean_key_name(request.headers.get("x-amz-copy-source")) + if isinstance(src_key, six.binary_type): + src_key = src_key.decode('utf-8') + src_key_parsed = urlparse(src_key) + src_bucket, src_key = unquote(src_key_parsed.path).\ + lstrip("/").split("/", 1) src_version_id = parse_qs(src_key_parsed.query).get( 'versionId', [None])[0] - self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, - storage=storage_class, acl=acl, src_version_id=src_version_id) + + if self.backend.get_key(src_bucket, src_key, version_id=src_version_id): + self.backend.copy_key(src_bucket, src_key, bucket_name, key_name, + storage=storage_class, acl=acl, src_version_id=src_version_id) + else: + return 404, response_headers, "" + new_key = self.backend.get_key(bucket_name, key_name) mdirective = request.headers.get('x-amz-metadata-directive') if mdirective is not None and mdirective == 'REPLACE': @@ -711,13 +809,20 @@ class ResponseObject(_TemplateEnvironmentMixin): def _key_response_head(self, bucket_name, query, key_name, headers): response_headers = {} version_id = query.get('versionId', [None])[0] + part_number = query.get('partNumber', [None])[0] + if part_number: + part_number = int(part_number) if_modified_since = headers.get('If-Modified-Since', None) if if_modified_since: if_modified_since = str_to_rfc_1123_datetime(if_modified_since) key = self.backend.get_key( - bucket_name, key_name, version_id=version_id) + bucket_name, + key_name, + version_id=version_id, + part_number=part_number + ) if key: response_headers.update(key.metadata) response_headers.update(key.response_dict) @@ -729,6 +834,58 @@ class ResponseObject(_TemplateEnvironmentMixin): else: return 404, response_headers, "" + def _acl_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + if not parsed_xml.get("AccessControlPolicy"): + raise MalformedACLError() + + # The owner is needed for some reason... + if not parsed_xml["AccessControlPolicy"].get("Owner"): + # TODO: Validate that the Owner is actually correct. + raise MalformedACLError() + + # If empty, then no ACLs: + if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None: + return [] + + if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"): + raise MalformedACLError() + + permissions = [ + "READ", + "WRITE", + "READ_ACP", + "WRITE_ACP", + "FULL_CONTROL" + ] + + if not isinstance(parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list): + parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = \ + [parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]] + + grants = self._get_grants_from_xml(parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], + MalformedACLError, permissions) + return FakeAcl(grants) + + def _get_grants_from_xml(self, grant_list, exception_type, permissions): + grants = [] + for grant in grant_list: + if grant.get("Permission", "") not in permissions: + raise exception_type() + + if grant["Grantee"].get("@xsi:type", "") not in ["CanonicalUser", "AmazonCustomerByEmail", "Group"]: + raise exception_type() + + # TODO: Verify that the proper grantee data is supplied based on the type. + + grants.append(FakeGrant( + [FakeGrantee(id=grant["Grantee"].get("ID", ""), display_name=grant["Grantee"].get("DisplayName", ""), + uri=grant["Grantee"].get("URI", ""))], + [grant["Permission"]]) + ) + + return grants + def _acl_from_headers(self, headers): canned_acl = headers.get('x-amz-acl', '') if canned_acl: @@ -812,6 +969,115 @@ class ResponseObject(_TemplateEnvironmentMixin): return [parsed_xml["CORSConfiguration"]["CORSRule"]] + def _logging_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"): + return {} + + if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"): + raise MalformedXML() + + if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"): + parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = "" + + # Get the ACLs: + if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"): + permissions = [ + "READ", + "WRITE", + "FULL_CONTROL" + ] + if not isinstance(parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"], list): + target_grants = self._get_grants_from_xml( + [parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"]], + MalformedXML, + permissions + ) + else: + target_grants = self._get_grants_from_xml( + parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"], + MalformedXML, + permissions + ) + + parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"] = target_grants + + return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"] + + def _notification_config_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + if not len(parsed_xml["NotificationConfiguration"]): + return {} + + # The types of notifications, and their required fields (apparently lambda is categorized by the API as + # "CloudFunction"): + notification_fields = [ + ("Topic", "sns"), + ("Queue", "sqs"), + ("CloudFunction", "lambda") + ] + + event_names = [ + 's3:ReducedRedundancyLostObject', + 's3:ObjectCreated:*', + 's3:ObjectCreated:Put', + 's3:ObjectCreated:Post', + 's3:ObjectCreated:Copy', + 's3:ObjectCreated:CompleteMultipartUpload', + 's3:ObjectRemoved:*', + 's3:ObjectRemoved:Delete', + 's3:ObjectRemoved:DeleteMarkerCreated' + ] + + found_notifications = 0 # Tripwire -- if this is not ever set, then there were no notifications + for name, arn_string in notification_fields: + # 1st verify that the proper notification configuration has been passed in (with an ARN that is close + # to being correct -- nothing too complex in the ARN logic): + the_notification = parsed_xml["NotificationConfiguration"].get("{}Configuration".format(name)) + if the_notification: + found_notifications += 1 + if not isinstance(the_notification, list): + the_notification = parsed_xml["NotificationConfiguration"]["{}Configuration".format(name)] \ + = [the_notification] + + for n in the_notification: + if not n[name].startswith("arn:aws:{}:".format(arn_string)): + raise InvalidNotificationARN() + + # 2nd, verify that the Events list is correct: + assert n["Event"] + if not isinstance(n["Event"], list): + n["Event"] = [n["Event"]] + + for event in n["Event"]: + if event not in event_names: + raise InvalidNotificationEvent() + + # Parse out the filters: + if n.get("Filter"): + # Error if S3Key is blank: + if not n["Filter"]["S3Key"]: + raise KeyError() + + if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list): + n["Filter"]["S3Key"]["FilterRule"] = [n["Filter"]["S3Key"]["FilterRule"]] + + for filter_rule in n["Filter"]["S3Key"]["FilterRule"]: + assert filter_rule["Name"] in ["suffix", "prefix"] + assert filter_rule["Value"] + + if not found_notifications: + return {} + + return parsed_xml["NotificationConfiguration"] + + def _accelerate_config_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + config = parsed_xml['AccelerateConfiguration'] + return config['Status'] + def _key_response_delete(self, bucket_name, query, key_name, headers): if query.get('uploadId'): upload_id = query['uploadId'][0] @@ -978,14 +1244,37 @@ S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """ """ S3_BUCKET_LOCATION = """ -{{ location }}""" +{% if location != None %}{{ location }}{% endif %}""" S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {% for rule in rules %} {{ rule.id }} + {% if rule.filter %} + + {{ rule.filter.prefix }} + {% if rule.filter.tag %} + + {{ rule.filter.tag.key }} + {{ rule.filter.tag.value }} + + {% endif %} + {% if rule.filter.and_filter %} + + {{ rule.filter.and_filter.prefix }} + {% for tag in rule.filter.and_filter.tags %} + + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + + {% endif %} + + {% else %} {{ rule.prefix if rule.prefix != None }} + {% endif %} {{ rule.status }} {% if rule.storage_class %} @@ -998,7 +1287,7 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {{ rule.storage_class }} {% endif %} - {% if rule.expiration_days or rule.expiration_date %} + {% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %} {% if rule.expiration_days %} {{ rule.expiration_days }} @@ -1006,8 +1295,27 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {% if rule.expiration_date %} {{ rule.expiration_date }} {% endif %} + {% if rule.expired_object_delete_marker %} + {{ rule.expired_object_delete_marker }} + {% endif %} {% endif %} + {% if rule.nvt_noncurrent_days and rule.nvt_storage_class %} + + {{ rule.nvt_noncurrent_days }} + {{ rule.nvt_storage_class }} + + {% endif %} + {% if rule.nve_noncurrent_days %} + + {{ rule.nve_noncurrent_days }} + + {% endif %} + {% if rule.aimu_days %} + + {{ rule.aimu_days }} + + {% endif %} {% endfor %} @@ -1039,7 +1347,7 @@ S3_BUCKET_GET_VERSIONS = """ {% for key in key_list %} {{ key.name }} - {{ key.version_id }} + {% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %} {% if latest_versions[key.name] == key.version_id %}true{% else %}false{% endif %} {{ key.last_modified_ISO8601 }} {{ key.etag }} @@ -1053,10 +1361,10 @@ S3_BUCKET_GET_VERSIONS = """ {% endfor %} {% for marker in delete_marker_list %} - {{ marker.key.name }} + {{ marker.name }} {{ marker.version_id }} - {% if latest_versions[marker.key.name] == marker.version_id %}true{% else %}false{% endif %} - {{ marker.key.last_modified_ISO8601 }} + {% if latest_versions[marker.name] == marker.version_id %}true{% else %}false{% endif %} + {{ marker.last_modified_ISO8601 }} 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a webfile @@ -1213,7 +1521,7 @@ S3_MULTIPART_LIST_RESPONSE = """ STANDARD 1 - {{ count }} + {{ count }} {{ count }} false {% for part in parts %} @@ -1320,3 +1628,115 @@ S3_NO_CORS_CONFIG = """ 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= """ + +S3_LOGGING_CONFIG = """ + + + {{ logging["TargetBucket"] }} + {{ logging["TargetPrefix"] }} + {% if logging.get("TargetGrants") %} + + {% for grant in logging["TargetGrants"] %} + + + {% if grant.grantees[0].uri %} + {{ grant.grantees[0].uri }} + {% endif %} + {% if grant.grantees[0].id %} + {{ grant.grantees[0].id }} + {% endif %} + {% if grant.grantees[0].display_name %} + {{ grant.grantees[0].display_name }} + {% endif %} + + {{ grant.permissions[0] }} + + {% endfor %} + + {% endif %} + + +""" + +S3_NO_LOGGING_CONFIG = """ + +""" + +S3_GET_BUCKET_NOTIFICATION_CONFIG = """ + + {% for topic in bucket.notification_configuration.topic %} + + {{ topic.id }} + {{ topic.arn }} + {% for event in topic.events %} + {{ event }} + {% endfor %} + {% if topic.filters %} + + + {% for rule in topic.filters["S3Key"]["FilterRule"] %} + + {{ rule["Name"] }} + {{ rule["Value"] }} + + {% endfor %} + + + {% endif %} + + {% endfor %} + {% for queue in bucket.notification_configuration.queue %} + + {{ queue.id }} + {{ queue.arn }} + {% for event in queue.events %} + {{ event }} + {% endfor %} + {% if queue.filters %} + + + {% for rule in queue.filters["S3Key"]["FilterRule"] %} + + {{ rule["Name"] }} + {{ rule["Value"] }} + + {% endfor %} + + + {% endif %} + + {% endfor %} + {% for cf in bucket.notification_configuration.cloud_function %} + + {{ cf.id }} + {{ cf.arn }} + {% for event in cf.events %} + {{ event }} + {% endfor %} + {% if cf.filters %} + + + {% for rule in cf.filters["S3Key"]["FilterRule"] %} + + {{ rule["Name"] }} + {{ rule["Value"] }} + + {% endfor %} + + + {% endif %} + + {% endfor %} + +""" + +S3_BUCKET_ACCELERATE = """ + + {{ bucket.accelerate_configuration }} + +""" + +S3_BUCKET_ACCELERATE_NOT_SET = """ + +""" diff --git a/moto/s3/utils.py b/moto/s3/utils.py index a121eae3a..85a812aad 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals +import logging +import os from boto.s3.key import Key import re @@ -6,10 +8,16 @@ import six from six.moves.urllib.parse import urlparse, unquote import sys + +log = logging.getLogger(__name__) + + bucket_name_regex = re.compile("(.+).s3(.*).amazonaws.com") def bucket_name_from_url(url): + if os.environ.get('S3_IGNORE_SUBDOMAIN_BUCKETNAME', '') in ['1', 'true']: + return None domain = urlparse(url).netloc if domain.startswith('www.'): @@ -27,6 +35,20 @@ def bucket_name_from_url(url): return None +REGION_URL_REGEX = re.compile( + r'^https?://(s3[-\.](?P.+)\.amazonaws\.com/(.+)|' + r'(.+)\.s3-(?P.+)\.amazonaws\.com)/?') + + +def parse_region_from_url(url): + match = REGION_URL_REGEX.search(url) + if match: + region = match.group('region1') or match.group('region2') + else: + region = 'us-east-1' + return region + + def metadata_from_headers(headers): metadata = {} meta_regex = re.compile( diff --git a/moto/s3bucket_path/utils.py b/moto/s3bucket_path/utils.py index e10e64fb6..1b9a034f4 100644 --- a/moto/s3bucket_path/utils.py +++ b/moto/s3bucket_path/utils.py @@ -3,12 +3,12 @@ from six.moves.urllib.parse import urlparse def bucket_name_from_url(url): - pth = urlparse(url).path.lstrip("/") + path = urlparse(url).path.lstrip("/") - l = pth.lstrip("/").split("/") - if len(l) == 0 or l[0] == "": + parts = path.lstrip("/").split("/") + if len(parts) == 0 or parts[0] == "": return None - return l[0] + return parts[0] def parse_key_name(path): diff --git a/moto/secretsmanager/__init__.py b/moto/secretsmanager/__init__.py new file mode 100644 index 000000000..c7fbb2869 --- /dev/null +++ b/moto/secretsmanager/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import secretsmanager_backends +from ..core.models import base_decorator + +secretsmanager_backend = secretsmanager_backends['us-east-1'] +mock_secretsmanager = base_decorator(secretsmanager_backends) diff --git a/moto/secretsmanager/exceptions.py b/moto/secretsmanager/exceptions.py new file mode 100644 index 000000000..fa81b6d8b --- /dev/null +++ b/moto/secretsmanager/exceptions.py @@ -0,0 +1,44 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class SecretsManagerClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(SecretsManagerClientError): + def __init__(self): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "Secrets Manager can't find the specified secret" + ) + + +class ClientError(SecretsManagerClientError): + def __init__(self, message): + super(ClientError, self).__init__( + 'InvalidParameterValue', + message) + + +class InvalidParameterException(SecretsManagerClientError): + def __init__(self, message): + super(InvalidParameterException, self).__init__( + 'InvalidParameterException', + message) + + +class ResourceExistsException(SecretsManagerClientError): + def __init__(self, message): + super(ResourceExistsException, self).__init__( + 'ResourceExistsException', + message + ) + + +class InvalidRequestException(SecretsManagerClientError): + def __init__(self, message): + super(InvalidRequestException, self).__init__( + 'InvalidRequestException', + message) diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py new file mode 100644 index 000000000..3e0424b6b --- /dev/null +++ b/moto/secretsmanager/models.py @@ -0,0 +1,399 @@ +from __future__ import unicode_literals + +import time +import json +import uuid +import datetime + +import boto3 + +from moto.core import BaseBackend, BaseModel +from .exceptions import ( + ResourceNotFoundException, + InvalidParameterException, + ResourceExistsException, + InvalidRequestException, + ClientError +) +from .utils import random_password, secret_arn + + +class SecretsManager(BaseModel): + + def __init__(self, region_name, **kwargs): + self.region = region_name + + +class SecretsManagerBackend(BaseBackend): + + def __init__(self, region_name=None, **kwargs): + super(SecretsManagerBackend, self).__init__() + self.region = region_name + self.secrets = {} + + def reset(self): + region_name = self.region + self.__dict__ = {} + self.__init__(region_name) + + def _is_valid_identifier(self, identifier): + return identifier in self.secrets + + def _unix_time_secs(self, dt): + epoch = datetime.datetime.utcfromtimestamp(0) + return (dt - epoch).total_seconds() + + def get_secret_value(self, secret_id, version_id, version_stage): + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException() + + if not version_id and version_stage: + # set version_id to match version_stage + versions_dict = self.secrets[secret_id]['versions'] + for ver_id, ver_val in versions_dict.items(): + if version_stage in ver_val['version_stages']: + version_id = ver_id + break + if not version_id: + raise ResourceNotFoundException() + + # TODO check this part + if 'deleted_date' in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + + secret = self.secrets[secret_id] + version_id = version_id or secret['default_version_id'] + + secret_version = secret['versions'][version_id] + + response_data = { + "ARN": secret_arn(self.region, secret['secret_id']), + "Name": secret['name'], + "VersionId": secret_version['version_id'], + "VersionStages": secret_version['version_stages'], + "CreatedDate": secret_version['createdate'], + } + + if 'secret_string' in secret_version: + response_data["SecretString"] = secret_version['secret_string'] + + if 'secret_binary' in secret_version: + response_data["SecretBinary"] = secret_version['secret_binary'] + + response = json.dumps(response_data) + + return response + + def create_secret(self, name, secret_string=None, secret_binary=None, tags=[], **kwargs): + + # error if secret exists + if name in self.secrets.keys(): + raise ResourceExistsException('A resource with the ID you requested already exists.') + + version_id = self._add_secret(name, secret_string=secret_string, secret_binary=secret_binary, tags=tags) + + response = json.dumps({ + "ARN": secret_arn(self.region, name), + "Name": name, + "VersionId": version_id, + }) + + return response + + def _add_secret(self, secret_id, secret_string=None, secret_binary=None, tags=[], version_id=None, version_stages=None): + + if version_stages is None: + version_stages = ['AWSCURRENT'] + + if not version_id: + version_id = str(uuid.uuid4()) + + secret_version = { + 'createdate': int(time.time()), + 'version_id': version_id, + 'version_stages': version_stages, + } + + if secret_string is not None: + secret_version['secret_string'] = secret_string + + if secret_binary is not None: + secret_version['secret_binary'] = secret_binary + + if secret_id in self.secrets: + # remove all old AWSPREVIOUS stages + for secret_verion_to_look_at in self.secrets[secret_id]['versions'].values(): + if 'AWSPREVIOUS' in secret_verion_to_look_at['version_stages']: + secret_verion_to_look_at['version_stages'].remove('AWSPREVIOUS') + + # set old AWSCURRENT secret to AWSPREVIOUS + previous_current_version_id = self.secrets[secret_id]['default_version_id'] + self.secrets[secret_id]['versions'][previous_current_version_id]['version_stages'] = ['AWSPREVIOUS'] + + self.secrets[secret_id]['versions'][version_id] = secret_version + self.secrets[secret_id]['default_version_id'] = version_id + else: + self.secrets[secret_id] = { + 'versions': { + version_id: secret_version + }, + 'default_version_id': version_id, + } + + secret = self.secrets[secret_id] + secret['secret_id'] = secret_id + secret['name'] = secret_id + secret['rotation_enabled'] = False + secret['rotation_lambda_arn'] = '' + secret['auto_rotate_after_days'] = 0 + secret['tags'] = tags + + return version_id + + def put_secret_value(self, secret_id, secret_string, version_stages): + + version_id = self._add_secret(secret_id, secret_string, version_stages=version_stages) + + response = json.dumps({ + 'ARN': secret_arn(self.region, secret_id), + 'Name': secret_id, + 'VersionId': version_id, + 'VersionStages': version_stages + }) + + return response + + def describe_secret(self, secret_id): + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + secret = self.secrets[secret_id] + + response = json.dumps({ + "ARN": secret_arn(self.region, secret['secret_id']), + "Name": secret['name'], + "Description": "", + "KmsKeyId": "", + "RotationEnabled": secret['rotation_enabled'], + "RotationLambdaARN": secret['rotation_lambda_arn'], + "RotationRules": { + "AutomaticallyAfterDays": secret['auto_rotate_after_days'] + }, + "LastRotatedDate": None, + "LastChangedDate": None, + "LastAccessedDate": None, + "DeletedDate": secret.get('deleted_date', None), + "Tags": secret['tags'] + }) + + return response + + def rotate_secret(self, secret_id, client_request_token=None, + rotation_lambda_arn=None, rotation_rules=None): + + rotation_days = 'AutomaticallyAfterDays' + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + if 'deleted_date' in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + + if client_request_token: + token_length = len(client_request_token) + if token_length < 32 or token_length > 64: + msg = ( + 'ClientRequestToken ' + 'must be 32-64 characters long.' + ) + raise InvalidParameterException(msg) + + if rotation_lambda_arn: + if len(rotation_lambda_arn) > 2048: + msg = ( + 'RotationLambdaARN ' + 'must <= 2048 characters long.' + ) + raise InvalidParameterException(msg) + + if rotation_rules: + if rotation_days in rotation_rules: + rotation_period = rotation_rules[rotation_days] + if rotation_period < 1 or rotation_period > 1000: + msg = ( + 'RotationRules.AutomaticallyAfterDays ' + 'must be within 1-1000.' + ) + raise InvalidParameterException(msg) + + secret = self.secrets[secret_id] + + old_secret_version = secret['versions'][secret['default_version_id']] + new_version_id = client_request_token or str(uuid.uuid4()) + + self._add_secret(secret_id, old_secret_version['secret_string'], secret['tags'], version_id=new_version_id, version_stages=['AWSCURRENT']) + + secret['rotation_lambda_arn'] = rotation_lambda_arn or '' + if rotation_rules: + secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0) + if secret['auto_rotate_after_days'] > 0: + secret['rotation_enabled'] = True + + if 'AWSCURRENT' in old_secret_version['version_stages']: + old_secret_version['version_stages'].remove('AWSCURRENT') + + response = json.dumps({ + "ARN": secret_arn(self.region, secret['secret_id']), + "Name": secret['name'], + "VersionId": new_version_id + }) + + return response + + def get_random_password(self, password_length, + exclude_characters, exclude_numbers, + exclude_punctuation, exclude_uppercase, + exclude_lowercase, include_space, + require_each_included_type): + # password size must have value less than or equal to 4096 + if password_length > 4096: + raise ClientError( + "ClientError: An error occurred (ValidationException) \ + when calling the GetRandomPassword operation: 1 validation error detected: Value '{}' at 'passwordLength' \ + failed to satisfy constraint: Member must have value less than or equal to 4096".format(password_length)) + if password_length < 4: + raise InvalidParameterException( + "InvalidParameterException: An error occurred (InvalidParameterException) \ + when calling the GetRandomPassword operation: Password length is too short based on the required types.") + + response = json.dumps({ + "RandomPassword": random_password(password_length, + exclude_characters, + exclude_numbers, + exclude_punctuation, + exclude_uppercase, + exclude_lowercase, + include_space, + require_each_included_type) + }) + + return response + + def list_secret_version_ids(self, secret_id): + secret = self.secrets[secret_id] + + version_list = [] + for version_id, version in secret['versions'].items(): + version_list.append({ + 'CreatedDate': int(time.time()), + 'LastAccessedDate': int(time.time()), + 'VersionId': version_id, + 'VersionStages': version['version_stages'], + }) + + response = json.dumps({ + 'ARN': secret['secret_id'], + 'Name': secret['name'], + 'NextToken': '', + 'Versions': version_list, + }) + + return response + + def list_secrets(self, max_results, next_token): + # TODO implement pagination and limits + + secret_list = [] + for secret in self.secrets.values(): + + versions_to_stages = {} + for version_id, version in secret['versions'].items(): + versions_to_stages[version_id] = version['version_stages'] + + secret_list.append({ + "ARN": secret_arn(self.region, secret['secret_id']), + "DeletedDate": secret.get('deleted_date', None), + "Description": "", + "KmsKeyId": "", + "LastAccessedDate": None, + "LastChangedDate": None, + "LastRotatedDate": None, + "Name": secret['name'], + "RotationEnabled": secret['rotation_enabled'], + "RotationLambdaARN": secret['rotation_lambda_arn'], + "RotationRules": { + "AutomaticallyAfterDays": secret['auto_rotate_after_days'] + }, + "SecretVersionsToStages": versions_to_stages, + "Tags": secret['tags'] + }) + + return secret_list, None + + def delete_secret(self, secret_id, recovery_window_in_days, force_delete_without_recovery): + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + if 'deleted_date' in self.secrets[secret_id]: + raise InvalidRequestException( + "An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \ + perform the operation on a secret that's currently marked deleted." + ) + + if recovery_window_in_days and force_delete_without_recovery: + raise InvalidParameterException( + "An error occurred (InvalidParameterException) when calling the DeleteSecret operation: You can't \ + use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays." + ) + + if recovery_window_in_days and (recovery_window_in_days < 7 or recovery_window_in_days > 30): + raise InvalidParameterException( + "An error occurred (InvalidParameterException) when calling the DeleteSecret operation: The \ + RecoveryWindowInDays value must be between 7 and 30 days (inclusive)." + ) + + deletion_date = datetime.datetime.utcnow() + + if force_delete_without_recovery: + secret = self.secrets.pop(secret_id, None) + else: + deletion_date += datetime.timedelta(days=recovery_window_in_days or 30) + self.secrets[secret_id]['deleted_date'] = self._unix_time_secs(deletion_date) + secret = self.secrets.get(secret_id, None) + + if not secret: + raise ResourceNotFoundException + + arn = secret_arn(self.region, secret['secret_id']) + name = secret['name'] + + return arn, name, self._unix_time_secs(deletion_date) + + def restore_secret(self, secret_id): + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + self.secrets[secret_id].pop('deleted_date', None) + + secret = self.secrets[secret_id] + + arn = secret_arn(self.region, secret['secret_id']) + name = secret['name'] + + return arn, name + + +available_regions = ( + boto3.session.Session().get_available_regions("secretsmanager") +) +secretsmanager_backends = {region: SecretsManagerBackend(region_name=region) + for region in available_regions} diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py new file mode 100644 index 000000000..090688351 --- /dev/null +++ b/moto/secretsmanager/responses.py @@ -0,0 +1,113 @@ +from __future__ import unicode_literals + +from moto.core.responses import BaseResponse + +from .models import secretsmanager_backends + +import json + + +class SecretsManagerResponse(BaseResponse): + + def get_secret_value(self): + secret_id = self._get_param('SecretId') + version_id = self._get_param('VersionId') + version_stage = self._get_param('VersionStage') + return secretsmanager_backends[self.region].get_secret_value( + secret_id=secret_id, + version_id=version_id, + version_stage=version_stage) + + def create_secret(self): + name = self._get_param('Name') + secret_string = self._get_param('SecretString') + secret_binary = self._get_param('SecretBinary') + tags = self._get_param('Tags', if_none=[]) + return secretsmanager_backends[self.region].create_secret( + name=name, + secret_string=secret_string, + secret_binary=secret_binary, + tags=tags + ) + + def get_random_password(self): + password_length = self._get_param('PasswordLength', if_none=32) + exclude_characters = self._get_param('ExcludeCharacters', if_none='') + exclude_numbers = self._get_param('ExcludeNumbers', if_none=False) + exclude_punctuation = self._get_param('ExcludePunctuation', if_none=False) + exclude_uppercase = self._get_param('ExcludeUppercase', if_none=False) + exclude_lowercase = self._get_param('ExcludeLowercase', if_none=False) + include_space = self._get_param('IncludeSpace', if_none=False) + require_each_included_type = self._get_param( + 'RequireEachIncludedType', if_none=True) + return secretsmanager_backends[self.region].get_random_password( + password_length=password_length, + exclude_characters=exclude_characters, + exclude_numbers=exclude_numbers, + exclude_punctuation=exclude_punctuation, + exclude_uppercase=exclude_uppercase, + exclude_lowercase=exclude_lowercase, + include_space=include_space, + require_each_included_type=require_each_included_type + ) + + def describe_secret(self): + secret_id = self._get_param('SecretId') + return secretsmanager_backends[self.region].describe_secret( + secret_id=secret_id + ) + + def rotate_secret(self): + client_request_token = self._get_param('ClientRequestToken') + rotation_lambda_arn = self._get_param('RotationLambdaARN') + rotation_rules = self._get_param('RotationRules') + secret_id = self._get_param('SecretId') + return secretsmanager_backends[self.region].rotate_secret( + secret_id=secret_id, + client_request_token=client_request_token, + rotation_lambda_arn=rotation_lambda_arn, + rotation_rules=rotation_rules + ) + + def put_secret_value(self): + secret_id = self._get_param('SecretId', if_none='') + secret_string = self._get_param('SecretString', if_none='') + version_stages = self._get_param('VersionStages', if_none=['AWSCURRENT']) + return secretsmanager_backends[self.region].put_secret_value( + secret_id=secret_id, + secret_string=secret_string, + version_stages=version_stages, + ) + + def list_secret_version_ids(self): + secret_id = self._get_param('SecretId', if_none='') + return secretsmanager_backends[self.region].list_secret_version_ids( + secret_id=secret_id + ) + + def list_secrets(self): + max_results = self._get_int_param("MaxResults") + next_token = self._get_param("NextToken") + secret_list, next_token = secretsmanager_backends[self.region].list_secrets( + max_results=max_results, + next_token=next_token, + ) + return json.dumps(dict(SecretList=secret_list, NextToken=next_token)) + + def delete_secret(self): + secret_id = self._get_param("SecretId") + recovery_window_in_days = self._get_param("RecoveryWindowInDays") + force_delete_without_recovery = self._get_param("ForceDeleteWithoutRecovery") + arn, name, deletion_date = secretsmanager_backends[self.region].delete_secret( + secret_id=secret_id, + recovery_window_in_days=recovery_window_in_days, + force_delete_without_recovery=force_delete_without_recovery, + ) + return json.dumps(dict(ARN=arn, Name=name, DeletionDate=deletion_date)) + + def restore_secret(self): + secret_id = self._get_param("SecretId") + arn, name = secretsmanager_backends[self.region].restore_secret( + secret_id=secret_id, + ) + return json.dumps(dict(ARN=arn, Name=name)) diff --git a/moto/secretsmanager/urls.py b/moto/secretsmanager/urls.py new file mode 100644 index 000000000..9e39e7263 --- /dev/null +++ b/moto/secretsmanager/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import SecretsManagerResponse + +url_bases = [ + "https?://secretsmanager.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': SecretsManagerResponse.dispatch, +} diff --git a/moto/secretsmanager/utils.py b/moto/secretsmanager/utils.py new file mode 100644 index 000000000..231fea296 --- /dev/null +++ b/moto/secretsmanager/utils.py @@ -0,0 +1,73 @@ +from __future__ import unicode_literals + +import random +import string +import six +import re + + +def random_password(password_length, exclude_characters, exclude_numbers, + exclude_punctuation, exclude_uppercase, exclude_lowercase, + include_space, require_each_included_type): + + password = '' + required_characters = '' + + if not exclude_lowercase and not exclude_uppercase: + password += string.ascii_letters + required_characters += random.choice(_exclude_characters( + string.ascii_lowercase, exclude_characters)) + required_characters += random.choice(_exclude_characters( + string.ascii_uppercase, exclude_characters)) + elif not exclude_lowercase: + password += string.ascii_lowercase + required_characters += random.choice(_exclude_characters( + string.ascii_lowercase, exclude_characters)) + elif not exclude_uppercase: + password += string.ascii_uppercase + required_characters += random.choice(_exclude_characters( + string.ascii_uppercase, exclude_characters)) + if not exclude_numbers: + password += string.digits + required_characters += random.choice(_exclude_characters( + string.digits, exclude_characters)) + if not exclude_punctuation: + password += string.punctuation + required_characters += random.choice(_exclude_characters( + string.punctuation, exclude_characters)) + if include_space: + password += " " + required_characters += " " + + password = ''.join( + six.text_type(random.choice(password)) + for x in range(password_length)) + + if require_each_included_type: + password = _add_password_require_each_included_type( + password, required_characters) + + password = _exclude_characters(password, exclude_characters) + return password + + +def secret_arn(region, secret_id): + id_string = ''.join(random.choice(string.ascii_letters) for _ in range(5)) + return "arn:aws:secretsmanager:{0}:1234567890:secret:{1}-{2}".format( + region, secret_id, id_string) + + +def _exclude_characters(password, exclude_characters): + for c in exclude_characters: + if c in string.punctuation: + # Escape punctuation regex usage + c = "\{0}".format(c) + password = re.sub(c, '', str(password)) + return password + + +def _add_password_require_each_included_type(password, required_characters): + password_with_required_char = password[:-len(required_characters)] + password_with_required_char += required_characters + + return password_with_required_char diff --git a/moto/server.py b/moto/server.py index e9f4c0904..971589cac 100644 --- a/moto/server.py +++ b/moto/server.py @@ -21,6 +21,16 @@ from moto.core.utils import convert_flask_to_httpretty_response HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "HEAD", "PATCH"] +DEFAULT_SERVICE_REGION = ('s3', 'us-east-1') + +# Map of unsigned calls to service-region as per AWS API docs +# https://docs.aws.amazon.com/cognito/latest/developerguide/resource-permissions.html#amazon-cognito-signed-versus-unsigned-apis +UNSIGNED_REQUESTS = { + 'AWSCognitoIdentityService': ('cognito-identity', 'us-east-1'), + 'AWSCognitoIdentityProviderService': ('cognito-idp', 'us-east-1'), +} + + class DomainDispatcherApplication(object): """ Dispatch requests to different applications based on the "Host:" header @@ -34,6 +44,9 @@ class DomainDispatcherApplication(object): self.service = service def get_backend_for_host(self, host): + if host == 'moto_api': + return host + if self.service: return self.service @@ -47,6 +60,32 @@ class DomainDispatcherApplication(object): raise RuntimeError('Invalid host: "%s"' % host) + def infer_service_region(self, environ): + auth = environ.get('HTTP_AUTHORIZATION') + if auth: + # Signed request + # Parse auth header to find service assuming a SigV4 request + # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html + # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] + try: + credential_scope = auth.split(",")[0].split()[1] + _, _, region, service, _ = credential_scope.split("/") + return service, region + except ValueError: + # Signature format does not match, this is exceptional and we can't + # infer a service-region. A reduced set of services still use + # the deprecated SigV2, ergo prefer S3 as most likely default. + # https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html + return DEFAULT_SERVICE_REGION + else: + # Unsigned request + target = environ.get('HTTP_X_AMZ_TARGET') + if target: + service, _ = target.split('.', 1) + return UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION) + # S3 is the last resort when the target is also unknown + return DEFAULT_SERVICE_REGION + def get_application(self, environ): path_info = environ.get('PATH_INFO', '') @@ -63,19 +102,15 @@ class DomainDispatcherApplication(object): else: host = environ['HTTP_HOST'].split(':')[0] if host in {'localhost', 'motoserver'} or host.startswith("192.168."): - # Fall back to parsing auth header to find service - # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] - try: - _, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[ - 1].split("/") - except (KeyError, ValueError): - region = 'us-east-1' - service = 's3' + service, region = self.infer_service_region(environ) if service == 'dynamodb': - dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0] - # If Newer API version, use dynamodb2 - if dynamo_api_version > "20111205": - host = "dynamodb2" + if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'): + host = 'dynamodbstreams' + else: + dynamo_api_version = environ['HTTP_X_AMZ_TARGET'].split("_")[1].split(".")[0] + # If Newer API version, use dynamodb2 + if dynamo_api_version > "20111205": + host = "dynamodb2" else: host = "{service}.{region}.amazonaws.com".format( service=service, region=region) @@ -186,9 +221,17 @@ def main(argv=sys.argv[1:]): parser.add_argument( '-s', '--ssl', action='store_true', - help='Enable SSL encrypted connection (use https://... URL)', + help='Enable SSL encrypted connection with auto-generated certificate (use https://... URL)', default=False ) + parser.add_argument( + '-c', '--ssl-cert', type=str, + help='Path to SSL certificate', + default=None) + parser.add_argument( + '-k', '--ssl-key', type=str, + help='Path to SSL private key', + default=None) args = parser.parse_args(argv) @@ -197,9 +240,15 @@ def main(argv=sys.argv[1:]): create_backend_app, service=args.service) main_app.debug = True + ssl_context = None + if args.ssl_key and args.ssl_cert: + ssl_context = (args.ssl_cert, args.ssl_key) + elif args.ssl: + ssl_context = 'adhoc' + run_simple(args.host, args.port, main_app, threaded=True, use_reloader=args.reload, - ssl_context='adhoc' if args.ssl else None) + ssl_context=ssl_context) if __name__ == '__main__': diff --git a/moto/ses/feedback.py b/moto/ses/feedback.py new file mode 100644 index 000000000..2d32f9ce0 --- /dev/null +++ b/moto/ses/feedback.py @@ -0,0 +1,81 @@ +""" +SES Feedback messages +Extracted from https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html +""" +COMMON_MAIL = { + "notificationType": "Bounce, Complaint, or Delivery.", + "mail": { + "timestamp": "2018-10-08T14:05:45 +0000", + "messageId": "000001378603177f-7a5433e7-8edb-42ae-af10-f0181f34d6ee-000000", + "source": "sender@example.com", + "sourceArn": "arn:aws:ses:us-west-2:888888888888:identity/example.com", + "sourceIp": "127.0.3.0", + "sendingAccountId": "123456789012", + "destination": [ + "recipient@example.com" + ], + "headersTruncated": False, + "headers": [ + { + "name": "From", + "value": "\"Sender Name\" " + }, + { + "name": "To", + "value": "\"Recipient Name\" " + } + ], + "commonHeaders": { + "from": [ + "Sender Name " + ], + "date": "Mon, 08 Oct 2018 14:05:45 +0000", + "to": [ + "Recipient Name " + ], + "messageId": " custom-message-ID", + "subject": "Message sent using Amazon SES" + } + } +} +BOUNCE = { + "bounceType": "Permanent", + "bounceSubType": "General", + "bouncedRecipients": [ + { + "status": "5.0.0", + "action": "failed", + "diagnosticCode": "smtp; 550 user unknown", + "emailAddress": "recipient1@example.com" + }, + { + "status": "4.0.0", + "action": "delayed", + "emailAddress": "recipient2@example.com" + } + ], + "reportingMTA": "example.com", + "timestamp": "2012-05-25T14:59:38.605Z", + "feedbackId": "000001378603176d-5a4b5ad9-6f30-4198-a8c3-b1eb0c270a1d-000000", + "remoteMtaIp": "127.0.2.0" +} +COMPLAINT = { + "userAgent": "AnyCompany Feedback Loop (V0.01)", + "complainedRecipients": [ + { + "emailAddress": "recipient1@example.com" + } + ], + "complaintFeedbackType": "abuse", + "arrivalDate": "2009-12-03T04:24:21.000-05:00", + "timestamp": "2012-05-25T14:59:38.623Z", + "feedbackId": "000001378603177f-18c07c78-fa81-4a58-9dd1-fedc3cb8f49a-000000" +} +DELIVERY = { + "timestamp": "2014-05-28T22:41:01.184Z", + "processingTimeMillis": 546, + "recipients": ["success@simulator.amazonses.com"], + "smtpResponse": "250 ok: Message 64111812 accepted", + "reportingMTA": "a8-70.smtp-out.amazonses.com", + "remoteMtaIp": "127.0.2.0" +} diff --git a/moto/ses/models.py b/moto/ses/models.py index 179f4d8e0..0544ac278 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -1,25 +1,61 @@ from __future__ import unicode_literals import email +from email.utils import parseaddr from moto.core import BaseBackend, BaseModel +from moto.sns.models import sns_backends from .exceptions import MessageRejectedError from .utils import get_random_message_id - +from .feedback import COMMON_MAIL, BOUNCE, COMPLAINT, DELIVERY RECIPIENT_LIMIT = 50 +class SESFeedback(BaseModel): + + BOUNCE = "Bounce" + COMPLAINT = "Complaint" + DELIVERY = "Delivery" + + SUCCESS_ADDR = "success" + BOUNCE_ADDR = "bounce" + COMPLAINT_ADDR = "complaint" + + FEEDBACK_SUCCESS_MSG = {"test": "success"} + FEEDBACK_BOUNCE_MSG = {"test": "bounce"} + FEEDBACK_COMPLAINT_MSG = {"test": "complaint"} + + @staticmethod + def generate_message(msg_type): + msg = dict(COMMON_MAIL) + if msg_type == SESFeedback.BOUNCE: + msg["bounce"] = BOUNCE + elif msg_type == SESFeedback.COMPLAINT: + msg["complaint"] = COMPLAINT + elif msg_type == SESFeedback.DELIVERY: + msg["delivery"] = DELIVERY + + return msg + + class Message(BaseModel): - def __init__(self, message_id): + def __init__(self, message_id, source, subject, body, destinations): self.id = message_id + self.source = source + self.subject = subject + self.body = body + self.destinations = destinations class RawMessage(BaseModel): - def __init__(self, message_id): + def __init__(self, message_id, source, destinations, raw_data): self.id = message_id + self.source = source + self.destinations = destinations + self.raw_data = raw_data class SESQuota(BaseModel): @@ -40,8 +76,10 @@ class SESBackend(BaseBackend): self.domains = [] self.sent_messages = [] self.sent_message_count = 0 + self.sns_topics = {} - def _is_verified_address(self, address): + def _is_verified_address(self, source): + _, address = parseaddr(source) if address in self.addresses: return True user, host = address.split('@', 1) @@ -68,7 +106,7 @@ class SESBackend(BaseBackend): else: self.domains.remove(identity) - def send_email(self, source, subject, body, destinations): + def send_email(self, source, subject, body, destinations, region): recipient_count = sum(map(len, destinations.values())) if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError('Too many recipients.') @@ -77,20 +115,67 @@ class SESBackend(BaseBackend): "Email address not verified %s" % source ) + self.__process_sns_feedback__(source, destinations, region) + message_id = get_random_message_id() - message = Message(message_id) + message = Message(message_id, source, subject, body, destinations) self.sent_messages.append(message) self.sent_message_count += recipient_count return message - def send_raw_email(self, source, destinations, raw_data): - if source not in self.addresses: - raise MessageRejectedError( - "Did not have authority to send from email %s" % source - ) + def __type_of_message__(self, destinations): + """Checks the destination for any special address that could indicate delivery, complaint or bounce + like in SES simualtor""" + alladdress = destinations.get("ToAddresses", []) + destinations.get("CcAddresses", []) + destinations.get("BccAddresses", []) + for addr in alladdress: + if SESFeedback.SUCCESS_ADDR in addr: + return SESFeedback.DELIVERY + elif SESFeedback.COMPLAINT_ADDR in addr: + return SESFeedback.COMPLAINT + elif SESFeedback.BOUNCE_ADDR in addr: + return SESFeedback.BOUNCE + + return None + + def __generate_feedback__(self, msg_type): + """Generates the SNS message for the feedback""" + return SESFeedback.generate_message(msg_type) + + def __process_sns_feedback__(self, source, destinations, region): + domain = str(source) + if "@" in domain: + domain = domain.split("@")[1] + if domain in self.sns_topics: + msg_type = self.__type_of_message__(destinations) + if msg_type is not None: + sns_topic = self.sns_topics[domain].get(msg_type, None) + if sns_topic is not None: + message = self.__generate_feedback__(msg_type) + if message: + sns_backends[region].publish(sns_topic, message) + + def send_raw_email(self, source, destinations, raw_data, region): + if source is not None: + _, source_email_address = parseaddr(source) + if source_email_address not in self.addresses: + raise MessageRejectedError( + "Did not have authority to send from email %s" % source_email_address + ) recipient_count = len(destinations) message = email.message_from_string(raw_data) + if source is None: + if message['from'] is None: + raise MessageRejectedError( + "Source not specified" + ) + + _, source_email_address = parseaddr(message['from']) + if source_email_address not in self.addresses: + raise MessageRejectedError( + "Did not have authority to send from email %s" % source_email_address + ) + for header in 'TO', 'CC', 'BCC': recipient_count += sum( d.strip() and 1 or 0 @@ -99,14 +184,27 @@ class SESBackend(BaseBackend): if recipient_count > RECIPIENT_LIMIT: raise MessageRejectedError('Too many recipients.') + self.__process_sns_feedback__(source, destinations, region) + self.sent_message_count += recipient_count message_id = get_random_message_id() - message = RawMessage(message_id) + message = RawMessage(message_id, source, destinations, raw_data) self.sent_messages.append(message) return message def get_send_quota(self): return SESQuota(self.sent_message_count) + def set_identity_notification_topic(self, identity, notification_type, sns_topic): + identity_sns_topics = self.sns_topics.get(identity, {}) + if sns_topic is None: + del identity_sns_topics[notification_type] + else: + identity_sns_topics[notification_type] = sns_topic + + self.sns_topics[identity] = identity_sns_topics + + return {} + ses_backend = SESBackend() diff --git a/moto/ses/responses.py b/moto/ses/responses.py index 6cd018aa6..d2dda55f1 100644 --- a/moto/ses/responses.py +++ b/moto/ses/responses.py @@ -70,12 +70,15 @@ class EmailResponse(BaseResponse): break destinations[dest_type].append(address[0]) - message = ses_backend.send_email(source, subject, body, destinations) + message = ses_backend.send_email(source, subject, body, destinations, self.region) template = self.response_template(SEND_EMAIL_RESPONSE) return template.render(message=message) def send_raw_email(self): - source = self.querystring.get('Source')[0] + source = self.querystring.get('Source') + if source is not None: + source, = source + raw_data = self.querystring.get('RawMessage.Data')[0] raw_data = base64.b64decode(raw_data) if six.PY3: @@ -89,7 +92,7 @@ class EmailResponse(BaseResponse): break destinations.append(address[0]) - message = ses_backend.send_raw_email(source, destinations, raw_data) + message = ses_backend.send_raw_email(source, destinations, raw_data, self.region) template = self.response_template(SEND_RAW_EMAIL_RESPONSE) return template.render(message=message) @@ -98,6 +101,18 @@ class EmailResponse(BaseResponse): template = self.response_template(GET_SEND_QUOTA_RESPONSE) return template.render(quota=quota) + def set_identity_notification_topic(self): + + identity = self.querystring.get("Identity")[0] + not_type = self.querystring.get("NotificationType")[0] + sns_topic = self.querystring.get("SnsTopic") + if sns_topic: + sns_topic = sns_topic[0] + + ses_backend.set_identity_notification_topic(identity, not_type, sns_topic) + template = self.response_template(SET_IDENTITY_NOTIFICATION_TOPIC_RESPONSE) + return template.render() + VERIFY_EMAIL_IDENTITY = """ @@ -197,3 +212,10 @@ GET_SEND_QUOTA_RESPONSE = """ + + + 47e0ef1a-9bf2-11e1-9279-0100e8cf109a + +""" diff --git a/moto/sns/models.py b/moto/sns/models.py index 80da5f92f..18b86cb93 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -4,14 +4,15 @@ import datetime import uuid import json -import boto.sns import requests import six import re +from boto3 import Session + from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_with_milliseconds +from moto.core.utils import iso_8601_datetime_with_milliseconds, camelcase_to_underscores from moto.sqs import sqs_backends from moto.awslambda import lambda_backends @@ -23,6 +24,7 @@ from .utils import make_arn_for_topic, make_arn_for_subscription DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_PAGE_SIZE = 100 +MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB class Topic(BaseModel): @@ -42,11 +44,12 @@ class Topic(BaseModel): self.subscriptions_confimed = 0 self.subscriptions_deleted = 0 - def publish(self, message): + def publish(self, message, subject=None, message_attributes=None): message_id = six.text_type(uuid.uuid4()) subscriptions, _ = self.sns_backend.list_subscriptions(self.arn) for subscription in subscriptions: - subscription.publish(message, message_id) + subscription.publish(message, message_id, subject=subject, + message_attributes=message_attributes) return message_id def get_cfn_attribute(self, attribute_name): @@ -81,29 +84,72 @@ class Subscription(BaseModel): self.protocol = protocol self.arn = make_arn_for_subscription(self.topic.arn) self.attributes = {} + self._filter_policy = None # filter policy as a dict, not json. self.confirmed = False - def publish(self, message, message_id): + def publish(self, message, message_id, subject=None, + message_attributes=None): + if not self._matches_filter_policy(message_attributes): + return + if self.protocol == 'sqs': queue_name = self.endpoint.split(":")[-1] region = self.endpoint.split(":")[3] - enveloped_message = json.dumps(self.get_post_data(message, message_id), sort_keys=True, indent=2, separators=(',', ': ')) + if self.attributes.get('RawMessageDelivery') != 'true': + enveloped_message = json.dumps(self.get_post_data(message, message_id, subject, message_attributes=message_attributes), sort_keys=True, indent=2, separators=(',', ': ')) + else: + enveloped_message = message sqs_backends[region].send_message(queue_name, enveloped_message) elif self.protocol in ['http', 'https']: - post_data = self.get_post_data(message, message_id) - requests.post(self.endpoint, json=post_data) + post_data = self.get_post_data(message, message_id, subject) + requests.post(self.endpoint, json=post_data, headers={'Content-Type': 'text/plain; charset=UTF-8'}) elif self.protocol == 'lambda': # TODO: support bad function name - function_name = self.endpoint.split(":")[-1] - region = self.arn.split(':')[3] - lambda_backends[region].send_message(function_name, message) + # http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + arr = self.endpoint.split(":") + region = arr[3] + qualifier = None + if len(arr) == 7: + assert arr[5] == 'function' + function_name = arr[-1] + elif len(arr) == 8: + assert arr[5] == 'function' + qualifier = arr[-1] + function_name = arr[-2] + else: + assert False - def get_post_data(self, message, message_id): - return { + lambda_backends[region].send_message(function_name, message, subject=subject, qualifier=qualifier) + + def _matches_filter_policy(self, message_attributes): + # TODO: support Anything-but matching, prefix matching and + # numeric value matching. + if not self._filter_policy: + return True + + if message_attributes is None: + message_attributes = {} + + def _field_match(field, rules, message_attributes): + if field not in message_attributes: + return False + for rule in rules: + if isinstance(rule, six.string_types): + # only string value matching is supported + if message_attributes[field]['Value'] == rule: + return True + return False + + return all(_field_match(field, rules, message_attributes) + for field, rules in six.iteritems(self._filter_policy)) + + def get_post_data( + self, message, message_id, subject, message_attributes=None): + post_data = { "Type": "Notification", "MessageId": message_id, "TopicArn": self.topic.arn, - "Subject": "my subject", + "Subject": subject or "my subject", "Message": message, "Timestamp": iso_8601_datetime_with_milliseconds(datetime.datetime.utcnow()), "SignatureVersion": "1", @@ -111,6 +157,9 @@ class Subscription(BaseModel): "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem", "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55" } + if message_attributes: + post_data["MessageAttributes"] = message_attributes + return post_data class PlatformApplication(BaseModel): @@ -194,11 +243,14 @@ class SNSBackend(BaseBackend): def update_sms_attributes(self, attrs): self.sms_attributes.update(attrs) - def create_topic(self, name): - fails_constraints = not re.match(r'^[a-zA-Z0-9](?:[A-Za-z0-9_-]{0,253}[a-zA-Z0-9])?$', name) + def create_topic(self, name, attributes=None): + fails_constraints = not re.match(r'^[a-zA-Z0-9_-]{1,256}$', name) if fails_constraints: raise InvalidParameterValue("Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.") candidate_topic = Topic(name, self) + if attributes: + for attribute in attributes: + setattr(candidate_topic, camelcase_to_underscores(attribute), attributes[attribute]) if candidate_topic.arn in self.topics: return self.topics[candidate_topic.arn] else: @@ -206,7 +258,7 @@ class SNSBackend(BaseBackend): return candidate_topic def _get_values_nexttoken(self, values_map, next_token=None): - if next_token is None: + if next_token is None or not next_token: next_token = 0 next_token = int(next_token) values = list(values_map.values())[ @@ -247,11 +299,21 @@ class SNSBackend(BaseBackend): setattr(topic, attribute_name, attribute_value) def subscribe(self, topic_arn, endpoint, protocol): + # AWS doesn't create duplicates + old_subscription = self._find_subscription(topic_arn, endpoint, protocol) + if old_subscription: + return old_subscription topic = self.get_topic(topic_arn) subscription = Subscription(topic, endpoint, protocol) self.subscriptions[subscription.arn] = subscription return subscription + def _find_subscription(self, topic_arn, endpoint, protocol): + for subscription in self.subscriptions.values(): + if subscription.topic.arn == topic_arn and subscription.endpoint == endpoint and subscription.protocol == protocol: + return subscription + return None + def unsubscribe(self, subscription_arn): self.subscriptions.pop(subscription_arn) @@ -264,13 +326,18 @@ class SNSBackend(BaseBackend): else: return self._get_values_nexttoken(self.subscriptions, next_token) - def publish(self, arn, message, subject=None): - if subject is not None and len(subject) >= 100: + def publish(self, arn, message, subject=None, message_attributes=None): + if subject is not None and len(subject) > 100: + # Note that the AWS docs around length are wrong: https://github.com/spulec/moto/issues/1503 raise ValueError('Subject must be less than 100 characters') + if len(message) > MAXIMUM_MESSAGE_LENGTH: + raise InvalidParameterValue("An error occurred (InvalidParameter) when calling the Publish operation: Invalid parameter: Message too long") + try: topic = self.get_topic(arn) - message_id = topic.publish(message) + message_id = topic.publish(message, subject=subject, + message_attributes=message_attributes) except SNSNotFoundError: endpoint = self.get_endpoint(arn) message_id = endpoint.publish(message) @@ -342,7 +409,7 @@ class SNSBackend(BaseBackend): return subscription.attributes def set_subscription_attributes(self, arn, name, value): - if name not in ['RawMessageDelivery', 'DeliveryPolicy']: + if name not in ['RawMessageDelivery', 'DeliveryPolicy', 'FilterPolicy']: raise SNSInvalidParameter('AttributeName') # TODO: should do validation @@ -353,10 +420,13 @@ class SNSBackend(BaseBackend): subscription.attributes[name] = value + if name == 'FilterPolicy': + subscription._filter_policy = json.loads(value) + sns_backends = {} -for region in boto.sns.regions(): - sns_backends[region.name] = SNSBackend(region.name) +for region in Session().get_available_regions('sns'): + sns_backends[region] = SNSBackend(region) DEFAULT_TOPIC_POLICY = { diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 3b4aade80..440115429 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -6,7 +6,7 @@ from collections import defaultdict from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from .models import sns_backends -from .exceptions import SNSNotFoundError +from .exceptions import SNSNotFoundError, InvalidParameterValue from .utils import is_e164 @@ -30,9 +30,53 @@ class SNSResponse(BaseResponse): in attributes ) + def _parse_message_attributes(self, prefix='', value_namespace='Value.'): + message_attributes = self._get_object_map( + 'MessageAttributes.entry', + name='Name', + value='Value' + ) + # SNS converts some key names before forwarding messages + # DataType -> Type, StringValue -> Value, BinaryValue -> Value + transformed_message_attributes = {} + for name, value in message_attributes.items(): + # validation + data_type = value['DataType'] + if not data_type: + raise InvalidParameterValue( + "The message attribute '{0}' must contain non-empty " + "message attribute value.".format(name)) + + data_type_parts = data_type.split('.') + if (len(data_type_parts) > 2 or + data_type_parts[0] not in ['String', 'Binary', 'Number']): + raise InvalidParameterValue( + "The message attribute '{0}' has an invalid message " + "attribute type, the set of supported type prefixes is " + "Binary, Number, and String.".format(name)) + + transform_value = None + if 'StringValue' in value: + transform_value = value['StringValue'] + elif 'BinaryValue' in value: + transform_value = value['BinaryValue'] + if not transform_value: + raise InvalidParameterValue( + "The message attribute '{0}' must contain non-empty " + "message attribute value for message attribute " + "type '{1}'.".format(name, data_type[0])) + + # transformation + transformed_message_attributes[name] = { + 'Type': data_type, 'Value': transform_value + } + + return transformed_message_attributes + def create_topic(self): name = self._get_param('Name') - topic = self.backend.create_topic(name) + attributes = self._get_attributes() + topic = self.backend.create_topic(name, attributes) if self.request_json: return json.dumps({ @@ -138,6 +182,7 @@ class SNSResponse(BaseResponse): topic_arn = self._get_param('TopicArn') endpoint = self._get_param('Endpoint') protocol = self._get_param('Protocol') + attributes = self._get_attributes() if protocol == 'sms' and not is_e164(endpoint): return self._error( @@ -147,6 +192,10 @@ class SNSResponse(BaseResponse): subscription = self.backend.subscribe(topic_arn, endpoint, protocol) + if attributes is not None: + for attr_name, attr_value in attributes.items(): + self.backend.set_subscription_attributes(subscription.arn, attr_name, attr_value) + if self.request_json: return json.dumps({ "SubscribeResponse": { @@ -241,6 +290,8 @@ class SNSResponse(BaseResponse): phone_number = self._get_param('PhoneNumber') subject = self._get_param('Subject') + message_attributes = self._parse_message_attributes() + if phone_number is not None: # Check phone is correct syntax (e164) if not is_e164(phone_number): @@ -265,7 +316,9 @@ class SNSResponse(BaseResponse): message = self._get_param('Message') try: - message_id = self.backend.publish(arn, message, subject=subject) + message_id = self.backend.publish( + arn, message, subject=subject, + message_attributes=message_attributes) except ValueError as err: error_response = self._error('InvalidParameter', str(err)) return error_response, dict(status=400) diff --git a/moto/sqs/exceptions.py b/moto/sqs/exceptions.py index baf721b53..5f1cc46b2 100644 --- a/moto/sqs/exceptions.py +++ b/moto/sqs/exceptions.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +from moto.core.exceptions import RESTError class MessageNotInflight(Exception): @@ -21,3 +22,11 @@ class MessageAttributesInvalid(Exception): class QueueDoesNotExist(Exception): status_code = 404 description = "The specified queue does not exist for this wsdl version." + + +class QueueAlreadyExists(RESTError): + code = 400 + + def __init__(self, message): + super(QueueAlreadyExists, self).__init__( + "QueueAlreadyExists", message) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 85b69ab0e..f2e3ed400 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -18,6 +18,7 @@ from .exceptions import ( MessageAttributesInvalid, MessageNotInflight, QueueDoesNotExist, + QueueAlreadyExists, ReceiptHandleIsInvalid, ) @@ -38,6 +39,8 @@ class Message(BaseModel): self.sent_timestamp = None self.approximate_first_receive_timestamp = None self.approximate_receive_count = 0 + self.deduplication_id = None + self.group_id = None self.visible_at = 0 self.delayed_until = 0 @@ -152,68 +155,108 @@ class Message(BaseModel): class Queue(BaseModel): - camelcase_attributes = ['ApproximateNumberOfMessages', - 'ApproximateNumberOfMessagesDelayed', - 'ApproximateNumberOfMessagesNotVisible', - 'ContentBasedDeduplication', - 'CreatedTimestamp', - 'DelaySeconds', - 'FifoQueue', - 'KmsDataKeyReusePeriodSeconds', - 'KmsMasterKeyId', - 'LastModifiedTimestamp', - 'MaximumMessageSize', - 'MessageRetentionPeriod', - 'QueueArn', - 'ReceiveMessageWaitTimeSeconds', - 'VisibilityTimeout', - 'WaitTimeSeconds'] - ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', 'GetQueueAttributes', - 'GetQueueUrl', 'ReceiveMessage', 'SendMessage') + base_attributes = ['ApproximateNumberOfMessages', + 'ApproximateNumberOfMessagesDelayed', + 'ApproximateNumberOfMessagesNotVisible', + 'CreatedTimestamp', + 'DelaySeconds', + 'LastModifiedTimestamp', + 'MaximumMessageSize', + 'MessageRetentionPeriod', + 'QueueArn', + 'ReceiveMessageWaitTimeSeconds', + 'VisibilityTimeout'] + fifo_attributes = ['FifoQueue', + 'ContentBasedDeduplication'] + kms_attributes = ['KmsDataKeyReusePeriodSeconds', + 'KmsMasterKeyId'] + ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', + 'GetQueueAttributes', 'GetQueueUrl', + 'ReceiveMessage', 'SendMessage') def __init__(self, name, region, **kwargs): self.name = name - self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30)) self.region = region self.tags = {} - - self._messages = [] - - now = unix_time() - - # kwargs can also have: - # [Policy, RedrivePolicy] - self.fifo_queue = kwargs.get('FifoQueue', 'false') == 'true' - self.content_based_deduplication = kwargs.get('ContentBasedDeduplication', 'false') == 'true' - self.kms_master_key_id = kwargs.get('KmsMasterKeyId', 'alias/aws/sqs') - self.kms_data_key_reuse_period_seconds = int(kwargs.get('KmsDataKeyReusePeriodSeconds', 300)) - self.created_timestamp = now - self.delay_seconds = int(kwargs.get('DelaySeconds', 0)) - self.last_modified_timestamp = now - self.maximum_message_size = int(kwargs.get('MaximumMessageSize', 64 << 10)) - self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days - self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name) - self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0)) self.permissions = {} - # wait_time_seconds will be set to immediate return messages - self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0)) + self._messages = [] + self._pending_messages = set() - self.redrive_policy = {} + now = unix_time() + self.created_timestamp = now + self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, + self.name) self.dead_letter_queue = None - if 'RedrivePolicy' in kwargs: - self._setup_dlq(kwargs['RedrivePolicy']) + # default settings for a non fifo queue + defaults = { + 'ContentBasedDeduplication': 'false', + 'DelaySeconds': 0, + 'FifoQueue': 'false', + 'KmsDataKeyReusePeriodSeconds': 300, # five minutes + 'KmsMasterKeyId': None, + 'MaximumMessageSize': int(64 << 10), + 'MessageRetentionPeriod': 86400 * 4, # four days + 'Policy': None, + 'ReceiveMessageWaitTimeSeconds': 0, + 'RedrivePolicy': None, + 'VisibilityTimeout': 30, + } + + defaults.update(kwargs) + self._set_attributes(defaults, now) # Check some conditions if self.fifo_queue and not self.name.endswith('.fifo'): raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues') - def _setup_dlq(self, policy_json): - try: - self.redrive_policy = json.loads(policy_json) - except ValueError: - raise RESTError('InvalidParameterValue', 'Redrive policy does not contain valid json') + @property + def pending_messages(self): + return self._pending_messages + + @property + def pending_message_groups(self): + return set(message.group_id + for message in self._pending_messages + if message.group_id is not None) + + def _set_attributes(self, attributes, now=None): + if not now: + now = unix_time() + + integer_fields = ('DelaySeconds', 'KmsDataKeyreusePeriodSeconds', + 'MaximumMessageSize', 'MessageRetentionPeriod', + 'ReceiveMessageWaitTime', 'VisibilityTimeout') + bool_fields = ('ContentBasedDeduplication', 'FifoQueue') + + for key, value in six.iteritems(attributes): + if key in integer_fields: + value = int(value) + if key in bool_fields: + value = value == "true" + + if key == 'RedrivePolicy' and value is not None: + continue + + setattr(self, camelcase_to_underscores(key), value) + + if attributes.get('RedrivePolicy', None): + self._setup_dlq(attributes['RedrivePolicy']) + + self.last_modified_timestamp = now + + def _setup_dlq(self, policy): + + if isinstance(policy, six.text_type): + try: + self.redrive_policy = json.loads(policy) + except ValueError: + raise RESTError('InvalidParameterValue', 'Redrive policy is not a dict or valid json') + elif isinstance(policy, dict): + self.redrive_policy = policy + else: + raise RESTError('InvalidParameterValue', 'Redrive policy is not a dict or valid json') if 'deadLetterTargetArn' not in self.redrive_policy: raise RESTError('InvalidParameterValue', 'Redrive policy does not contain deadLetterTargetArn') @@ -251,8 +294,8 @@ class Queue(BaseModel): if 'VisibilityTimeout' in properties: queue.visibility_timeout = int(properties['VisibilityTimeout']) - if 'WaitTimeSeconds' in properties: - queue.wait_time_seconds = int(properties['WaitTimeSeconds']) + if 'ReceiveMessageWaitTimeSeconds' in properties: + queue.receive_message_wait_time_seconds = int(properties['ReceiveMessageWaitTimeSeconds']) return queue @classmethod @@ -281,11 +324,31 @@ class Queue(BaseModel): @property def attributes(self): result = {} - for attribute in self.camelcase_attributes: + + for attribute in self.base_attributes: attr = getattr(self, camelcase_to_underscores(attribute)) - if isinstance(attr, bool): - attr = str(attr).lower() result[attribute] = attr + + if self.fifo_queue: + for attribute in self.fifo_attributes: + attr = getattr(self, camelcase_to_underscores(attribute)) + result[attribute] = attr + + if self.kms_master_key_id: + for attribute in self.kms_attributes: + attr = getattr(self, camelcase_to_underscores(attribute)) + result[attribute] = attr + + if self.policy: + result['Policy'] = self.policy + + if self.redrive_policy: + result['RedrivePolicy'] = json.dumps(self.redrive_policy) + + for key in result: + if isinstance(result[key], bool): + result[key] = str(result[key]).lower() + return result def url(self, request_url): @@ -316,12 +379,30 @@ class SQSBackend(BaseBackend): def reset(self): region_name = self.region_name + self._reset_model_refs() self.__dict__ = {} self.__init__(region_name) def create_queue(self, name, **kwargs): queue = self.queues.get(name) - if queue is None: + if queue: + try: + kwargs.pop('region') + except KeyError: + pass + + new_queue = Queue(name, region=self.region_name, **kwargs) + + queue_attributes = queue.attributes + new_queue_attributes = new_queue.attributes + + for key in ['CreatedTimestamp', 'LastModifiedTimestamp']: + queue_attributes.pop(key) + new_queue_attributes.pop(key) + + if queue_attributes != new_queue_attributes: + raise QueueAlreadyExists("The specified queue already exists.") + else: try: kwargs.pop('region') except KeyError: @@ -352,12 +433,12 @@ class SQSBackend(BaseBackend): return self.queues.pop(queue_name) return False - def set_queue_attribute(self, queue_name, key, value): + def set_queue_attributes(self, queue_name, attributes): queue = self.get_queue(queue_name) - setattr(queue, key, value) + queue._set_attributes(attributes) return queue - def send_message(self, queue_name, message_body, message_attributes=None, delay_seconds=None): + def send_message(self, queue_name, message_body, message_attributes=None, delay_seconds=None, deduplication_id=None, group_id=None): queue = self.get_queue(queue_name) @@ -369,6 +450,12 @@ class SQSBackend(BaseBackend): message_id = get_random_message_id() message = Message(message_id, message_body) + # Attributes, but not *message* attributes + if deduplication_id is not None: + message.deduplication_id = deduplication_id + if group_id is not None: + message.group_id = group_id + if message_attributes: message.message_attributes = message_attributes @@ -397,6 +484,7 @@ class SQSBackend(BaseBackend): """ queue = self.get_queue(queue_name) result = [] + previous_result_count = len(result) polling_end = unix_time() + wait_seconds_timeout @@ -406,19 +494,25 @@ class SQSBackend(BaseBackend): if result or (wait_seconds_timeout and unix_time() > polling_end): break - if len(queue.messages) == 0: - # we want to break here, otherwise it will be an infinite loop - if wait_seconds_timeout == 0: - break - - import time - time.sleep(0.001) - continue - messages_to_dlq = [] + for message in queue.messages: if not message.visible: continue + + if message in queue.pending_messages: + # The message is pending but is visible again, so the + # consumer must have timed out. + queue.pending_messages.remove(message) + + if message.group_id and queue.fifo_queue: + if message.group_id in queue.pending_message_groups: + # There is already one active message with the same + # group, so we cannot deliver this one. + continue + + queue.pending_messages.add(message) + if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']: messages_to_dlq.append(message) continue @@ -434,6 +528,18 @@ class SQSBackend(BaseBackend): queue._messages.remove(message) queue.dead_letter_queue.add_message(message) + if previous_result_count == len(result): + if wait_seconds_timeout == 0: + # There is timeout and we have added no additional results, + # so break to avoid an infinite loop. + break + + import time + time.sleep(0.01) + continue + + previous_result_count = len(result) + return result def delete_message(self, queue_name, receipt_handle): @@ -443,6 +549,7 @@ class SQSBackend(BaseBackend): # Only delete message if it is not visible and the reciept_handle # matches. if message.receipt_handle == receipt_handle: + queue.pending_messages.remove(message) continue new_messages.append(message) queue._messages = new_messages @@ -454,6 +561,10 @@ class SQSBackend(BaseBackend): if message.visible: raise MessageNotInflight message.change_visibility(visibility_timeout) + if message.visible: + # If the message is visible again, remove it from pending + # messages. + queue.pending_messages.remove(message) return raise ReceiptHandleIsInvalid diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index bb21c1e2a..5ddaf8849 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -1,8 +1,10 @@ from __future__ import unicode_literals + +import re from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores, amz_crc32, amzn_request_id +from moto.core.utils import amz_crc32, amzn_request_id from .utils import parse_message_attributes from .models import sqs_backends from .exceptions import ( @@ -15,12 +17,11 @@ from .exceptions import ( MAXIMUM_VISIBILTY_TIMEOUT = 43200 MAXIMUM_MESSAGE_LENGTH = 262144 # 256 KiB DEFAULT_RECEIVED_MESSAGES = 1 -SQS_REGION_REGEX = r'://(.+?)\.queue\.amazonaws\.com' class SQSResponse(BaseResponse): - region_regex = SQS_REGION_REGEX + region_regex = re.compile(r'://(.+?)\.queue\.amazonaws\.com') @property def sqs_backend(self): @@ -29,7 +30,7 @@ class SQSResponse(BaseResponse): @property def attribute(self): if not hasattr(self, '_attribute'): - self._attribute = self._get_map_prefix('Attribute', key_end='Name', value_end='Value') + self._attribute = self._get_map_prefix('Attribute', key_end='.Name', value_end='.Value') return self._attribute def _get_queue_name(self): @@ -86,7 +87,8 @@ class SQSResponse(BaseResponse): try: queue = self.sqs_backend.get_queue(queue_name) except QueueDoesNotExist as e: - return self._error('QueueDoesNotExist', e.description) + return self._error('AWS.SimpleQueueService.NonExistentQueue', + e.description) if queue: template = self.response_template(GET_QUEUE_URL_RESPONSE) @@ -170,7 +172,8 @@ class SQSResponse(BaseResponse): try: queue = self.sqs_backend.get_queue(queue_name) except QueueDoesNotExist as e: - return self._error('QueueDoesNotExist', e.description) + return self._error('AWS.SimpleQueueService.NonExistentQueue', + e.description) template = self.response_template(GET_QUEUE_ATTRIBUTES_RESPONSE) return template.render(queue=queue) @@ -178,9 +181,8 @@ class SQSResponse(BaseResponse): def set_queue_attributes(self): # TODO validate self.get_param('QueueUrl') queue_name = self._get_queue_name() - for key, value in self.attribute.items(): - key = camelcase_to_underscores(key) - self.sqs_backend.set_queue_attribute(queue_name, key, value) + self.sqs_backend.set_queue_attributes(queue_name, self.attribute) + return SET_QUEUE_ATTRIBUTE_RESPONSE def delete_queue(self): @@ -196,6 +198,8 @@ class SQSResponse(BaseResponse): def send_message(self): message = self._get_param('MessageBody') delay_seconds = int(self._get_param('DelaySeconds', 0)) + message_group_id = self._get_param("MessageGroupId") + message_dedupe_id = self._get_param("MessageDeduplicationId") if len(message) > MAXIMUM_MESSAGE_LENGTH: return ERROR_TOO_LONG_RESPONSE, dict(status=400) @@ -211,7 +215,9 @@ class SQSResponse(BaseResponse): queue_name, message, message_attributes=message_attributes, - delay_seconds=delay_seconds + delay_seconds=delay_seconds, + deduplication_id=message_dedupe_id, + group_id=message_group_id ) template = self.response_template(SEND_MESSAGE_RESPONSE) return template.render(message=message, message_attributes=message_attributes) @@ -319,10 +325,26 @@ class SQSResponse(BaseResponse): except TypeError: message_count = DEFAULT_RECEIVED_MESSAGES + if message_count < 1 or message_count > 10: + return self._error( + "InvalidParameterValue", + "An error occurred (InvalidParameterValue) when calling " + "the ReceiveMessage operation: Value %s for parameter " + "MaxNumberOfMessages is invalid. Reason: must be between " + "1 and 10, if provided." % message_count) + try: wait_time = int(self.querystring.get("WaitTimeSeconds")[0]) except TypeError: - wait_time = queue.wait_time_seconds + wait_time = int(queue.receive_message_wait_time_seconds) + + if wait_time < 0 or wait_time > 20: + return self._error( + "InvalidParameterValue", + "An error occurred (InvalidParameterValue) when calling " + "the ReceiveMessage operation: Value %s for parameter " + "WaitTimeSeconds is invalid. Reason: must be <= 0 and " + ">= 20 if provided." % wait_time) try: visibility_timeout = self._get_validated_visibility_timeout() @@ -398,7 +420,7 @@ CREATE_QUEUE_RESPONSE = """ {{ queue.visibility_timeout }} - {{ requestid }} + """ @@ -407,7 +429,7 @@ GET_QUEUE_URL_RESPONSE = """ {{ queue.url(request_url) }} - {{ requestid }} + """ @@ -418,13 +440,13 @@ LIST_QUEUES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_QUEUE_RESPONSE = """ - {{ requestid }} + """ @@ -438,13 +460,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """ {% endfor %} - {{ requestid }} + """ SET_QUEUE_ATTRIBUTE_RESPONSE = """ - {{ requestid }} + """ @@ -461,7 +483,7 @@ SEND_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -489,6 +511,18 @@ RECEIVE_MESSAGE_RESPONSE = """ ApproximateFirstReceiveTimestamp {{ message.approximate_first_receive_timestamp }} + {% if message.deduplication_id is not none %} + + MessageDeduplicationId + {{ message.deduplication_id }} + + {% endif %} + {% if message.group_id is not none %} + + MessageGroupId + {{ message.group_id }} + + {% endif %} {% if message.message_attributes.items()|count > 0 %} {{- message.attribute_md5 -}} {% endif %} @@ -509,7 +543,7 @@ RECEIVE_MESSAGE_RESPONSE = """ {% endfor %} - {{ requestid }} + """ @@ -527,13 +561,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ DELETE_MESSAGE_RESPONSE = """ - {{ requestid }} + """ @@ -546,13 +580,13 @@ DELETE_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - {{ requestid }} + """ CHANGE_MESSAGE_VISIBILITY_RESPONSE = """ - {{ requestid }} + """ @@ -579,7 +613,7 @@ CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """ - {{ requestid }} + """ diff --git a/moto/ssm/models.py b/moto/ssm/models.py index c8c428b64..2f316a3ac 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -3,15 +3,26 @@ from __future__ import unicode_literals from collections import defaultdict from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import RESTError from moto.ec2 import ec2_backends +from moto.cloudformation import cloudformation_backends + +import datetime +import time +import uuid +import itertools class Parameter(BaseModel): - def __init__(self, name, value, type, description, keyid): + def __init__(self, name, value, type, description, allowed_pattern, keyid, + last_modified_date, version): self.name = name self.type = type self.description = description + self.allowed_pattern = allowed_pattern self.keyid = keyid + self.last_modified_date = last_modified_date + self.version = version if self.type == 'SecureString': self.value = self.encrypt(value) @@ -33,18 +44,184 @@ class Parameter(BaseModel): r = { 'Name': self.name, 'Type': self.type, - 'Value': self.decrypt(self.value) if decrypt else self.value + 'Value': self.decrypt(self.value) if decrypt else self.value, + 'Version': self.version, } + + return r + + def describe_response_object(self, decrypt=False): + r = self.response_object(decrypt) + r['LastModifiedDate'] = int(self.last_modified_date) + r['LastModifiedUser'] = 'N/A' + + if self.description: + r['Description'] = self.description + if self.keyid: r['KeyId'] = self.keyid + + if self.allowed_pattern: + r['AllowedPattern'] = self.allowed_pattern + return r +MAX_TIMEOUT_SECONDS = 3600 + + +class Command(BaseModel): + def __init__(self, comment='', document_name='', timeout_seconds=MAX_TIMEOUT_SECONDS, + instance_ids=None, max_concurrency='', max_errors='', + notification_config=None, output_s3_bucket_name='', + output_s3_key_prefix='', output_s3_region='', parameters=None, + service_role_arn='', targets=None, backend_region='us-east-1'): + + if instance_ids is None: + instance_ids = [] + + if notification_config is None: + notification_config = {} + + if parameters is None: + parameters = {} + + if targets is None: + targets = [] + + self.error_count = 0 + self.completed_count = len(instance_ids) + self.target_count = len(instance_ids) + self.command_id = str(uuid.uuid4()) + self.status = 'Success' + self.status_details = 'Details placeholder' + + self.requested_date_time = datetime.datetime.now() + self.requested_date_time_iso = self.requested_date_time.isoformat() + expires_after = self.requested_date_time + datetime.timedelta(0, timeout_seconds) + self.expires_after = expires_after.isoformat() + + self.comment = comment + self.document_name = document_name + self.instance_ids = instance_ids + self.max_concurrency = max_concurrency + self.max_errors = max_errors + self.notification_config = notification_config + self.output_s3_bucket_name = output_s3_bucket_name + self.output_s3_key_prefix = output_s3_key_prefix + self.output_s3_region = output_s3_region + self.parameters = parameters + self.service_role_arn = service_role_arn + self.targets = targets + self.backend_region = backend_region + + # Get instance ids from a cloud formation stack target. + stack_instance_ids = [self.get_instance_ids_by_stack_ids(target['Values']) for + target in self.targets if + target['Key'] == 'tag:aws:cloudformation:stack-name'] + + self.instance_ids += list(itertools.chain.from_iterable(stack_instance_ids)) + + # Create invocations with a single run command plugin. + self.invocations = [] + for instance_id in self.instance_ids: + self.invocations.append( + self.invocation_response(instance_id, "aws:runShellScript")) + + def get_instance_ids_by_stack_ids(self, stack_ids): + instance_ids = [] + cloudformation_backend = cloudformation_backends[self.backend_region] + for stack_id in stack_ids: + stack_resources = cloudformation_backend.list_stack_resources(stack_id) + instance_resources = [ + instance.id for instance in stack_resources + if instance.type == "AWS::EC2::Instance"] + instance_ids.extend(instance_resources) + + return instance_ids + + def response_object(self): + r = { + 'CommandId': self.command_id, + 'Comment': self.comment, + 'CompletedCount': self.completed_count, + 'DocumentName': self.document_name, + 'ErrorCount': self.error_count, + 'ExpiresAfter': self.expires_after, + 'InstanceIds': self.instance_ids, + 'MaxConcurrency': self.max_concurrency, + 'MaxErrors': self.max_errors, + 'NotificationConfig': self.notification_config, + 'OutputS3Region': self.output_s3_region, + 'OutputS3BucketName': self.output_s3_bucket_name, + 'OutputS3KeyPrefix': self.output_s3_key_prefix, + 'Parameters': self.parameters, + 'RequestedDateTime': self.requested_date_time_iso, + 'ServiceRole': self.service_role_arn, + 'Status': self.status, + 'StatusDetails': self.status_details, + 'TargetCount': self.target_count, + 'Targets': self.targets, + } + + return r + + def invocation_response(self, instance_id, plugin_name): + # Calculate elapsed time from requested time and now. Use a hardcoded + # elapsed time since there is no easy way to convert a timedelta to + # an ISO 8601 duration string. + elapsed_time_iso = "PT5M" + elapsed_time_delta = datetime.timedelta(minutes=5) + end_time = self.requested_date_time + elapsed_time_delta + + r = { + 'CommandId': self.command_id, + 'InstanceId': instance_id, + 'Comment': self.comment, + 'DocumentName': self.document_name, + 'PluginName': plugin_name, + 'ResponseCode': 0, + 'ExecutionStartDateTime': self.requested_date_time_iso, + 'ExecutionElapsedTime': elapsed_time_iso, + 'ExecutionEndDateTime': end_time.isoformat(), + 'Status': 'Success', + 'StatusDetails': 'Success', + 'StandardOutputContent': '', + 'StandardOutputUrl': '', + 'StandardErrorContent': '', + } + + return r + + def get_invocation(self, instance_id, plugin_name): + invocation = next( + (invocation for invocation in self.invocations + if invocation['InstanceId'] == instance_id), None) + + if invocation is None: + raise RESTError( + 'InvocationDoesNotExist', + 'An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation') + + if plugin_name is not None and invocation['PluginName'] != plugin_name: + raise RESTError( + 'InvocationDoesNotExist', + 'An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation') + + return invocation + + class SimpleSystemManagerBackend(BaseBackend): def __init__(self): self._parameters = {} self._resource_tags = defaultdict(lambda: defaultdict(dict)) + self._commands = [] + + # figure out what region we're in + for region, backend in ssm_backends.items(): + if backend == self: + self._region = region def delete_parameter(self, name): try: @@ -75,31 +252,66 @@ class SimpleSystemManagerBackend(BaseBackend): result.append(self._parameters[name]) return result - def get_parameters_by_path(self, path, with_decryption, recursive): + def get_parameters_by_path(self, path, with_decryption, recursive, filters=None): """Implement the get-parameters-by-path-API in the backend.""" result = [] # path could be with or without a trailing /. we handle this # difference here. path = path.rstrip('/') + '/' for param in self._parameters: - if not param.startswith(path): + if path != '/' and not param.startswith(path): continue if '/' in param[len(path) + 1:] and not recursive: continue + if not self._match_filters(self._parameters[param], filters): + continue result.append(self._parameters[param]) return result + @staticmethod + def _match_filters(parameter, filters=None): + """Return True if the given parameter matches all the filters""" + for filter_obj in (filters or []): + key = filter_obj['Key'] + option = filter_obj.get('Option', 'Equals') + values = filter_obj.get('Values', []) + + what = None + if key == 'Type': + what = parameter.type + elif key == 'KeyId': + what = parameter.keyid + + if option == 'Equals'\ + and not any(what == value for value in values): + return False + elif option == 'BeginsWith'\ + and not any(what.startswith(value) for value in values): + return False + # True if no false match (or no filters at all) + return True + def get_parameter(self, name, with_decryption): if name in self._parameters: return self._parameters[name] return None - def put_parameter(self, name, description, value, type, keyid, overwrite): - if not overwrite and name in self._parameters: - return - self._parameters[name] = Parameter( - name, value, type, description, keyid) + def put_parameter(self, name, description, value, type, allowed_pattern, + keyid, overwrite): + previous_parameter = self._parameters.get(name) + version = 1 + + if previous_parameter: + version = previous_parameter.version + 1 + + if not overwrite: + return + + last_modified_date = time.time() + self._parameters[name] = Parameter(name, value, type, description, + allowed_pattern, keyid, last_modified_date, version) + return version def add_tags_to_resource(self, resource_type, resource_id, tags): for key, value in tags.items(): @@ -114,6 +326,75 @@ class SimpleSystemManagerBackend(BaseBackend): def list_tags_for_resource(self, resource_type, resource_id): return self._resource_tags[resource_type][resource_id] + def send_command(self, **kwargs): + command = Command( + comment=kwargs.get('Comment', ''), + document_name=kwargs.get('DocumentName'), + timeout_seconds=kwargs.get('TimeoutSeconds', 3600), + instance_ids=kwargs.get('InstanceIds', []), + max_concurrency=kwargs.get('MaxConcurrency', '50'), + max_errors=kwargs.get('MaxErrors', '0'), + notification_config=kwargs.get('NotificationConfig', { + 'NotificationArn': 'string', + 'NotificationEvents': ['Success'], + 'NotificationType': 'Command' + }), + output_s3_bucket_name=kwargs.get('OutputS3BucketName', ''), + output_s3_key_prefix=kwargs.get('OutputS3KeyPrefix', ''), + output_s3_region=kwargs.get('OutputS3Region', ''), + parameters=kwargs.get('Parameters', {}), + service_role_arn=kwargs.get('ServiceRoleArn', ''), + targets=kwargs.get('Targets', []), + backend_region=self._region) + + self._commands.append(command) + return { + 'Command': command.response_object() + } + + def list_commands(self, **kwargs): + """ + https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_ListCommands.html + """ + commands = self._commands + + command_id = kwargs.get('CommandId', None) + if command_id: + commands = [self.get_command_by_id(command_id)] + instance_id = kwargs.get('InstanceId', None) + if instance_id: + commands = self.get_commands_by_instance_id(instance_id) + + return { + 'Commands': [command.response_object() for command in commands] + } + + def get_command_by_id(self, id): + command = next( + (command for command in self._commands if command.command_id == id), None) + + if command is None: + raise RESTError('InvalidCommandId', 'Invalid command id.') + + return command + + def get_commands_by_instance_id(self, instance_id): + return [ + command for command in self._commands + if instance_id in command.instance_ids] + + def get_command_invocation(self, **kwargs): + """ + https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetCommandInvocation.html + """ + + command_id = kwargs.get('CommandId') + instance_id = kwargs.get('InstanceId') + plugin_name = kwargs.get('PluginName', None) + + command = self.get_command_by_id(command_id) + return command.get_invocation(instance_id, plugin_name) + ssm_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 3227839b9..c47d4127a 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -85,9 +85,10 @@ class SimpleSystemManagerResponse(BaseResponse): path = self._get_param('Path') with_decryption = self._get_param('WithDecryption') recursive = self._get_param('Recursive', False) + filters = self._get_param('ParameterFilters') result = self.ssm_backend.get_parameters_by_path( - path, with_decryption, recursive + path, with_decryption, recursive, filters ) response = { @@ -117,7 +118,7 @@ class SimpleSystemManagerResponse(BaseResponse): end = token + page_size for parameter in result[token:]: - param_data = parameter.response_object(False) + param_data = parameter.describe_response_object(False) add = False if filters: @@ -159,12 +160,22 @@ class SimpleSystemManagerResponse(BaseResponse): description = self._get_param('Description') value = self._get_param('Value') type_ = self._get_param('Type') + allowed_pattern = self._get_param('AllowedPattern') keyid = self._get_param('KeyId') overwrite = self._get_param('Overwrite', False) - self.ssm_backend.put_parameter( - name, description, value, type_, keyid, overwrite) - return json.dumps({}) + result = self.ssm_backend.put_parameter( + name, description, value, type_, allowed_pattern, keyid, overwrite) + + if result is None: + error = { + '__type': 'ParameterAlreadyExists', + 'message': 'Parameter {0} already exists.'.format(name) + } + return json.dumps(error), dict(status=400) + + response = {'Version': result} + return json.dumps(response) def add_tags_to_resource(self): resource_id = self._get_param('ResourceId') @@ -190,3 +201,18 @@ class SimpleSystemManagerResponse(BaseResponse): tag_list = [{'Key': k, 'Value': v} for (k, v) in tags.items()] response = {'TagList': tag_list} return json.dumps(response) + + def send_command(self): + return json.dumps( + self.ssm_backend.send_command(**self.request_params) + ) + + def list_commands(self): + return json.dumps( + self.ssm_backend.list_commands(**self.request_params) + ) + + def get_command_invocation(self): + return json.dumps( + self.ssm_backend.get_command_invocation(**self.request_params) + ) diff --git a/moto/ssm/urls.py b/moto/ssm/urls.py index d22866486..9ac327325 100644 --- a/moto/ssm/urls.py +++ b/moto/ssm/urls.py @@ -3,6 +3,7 @@ from .responses import SimpleSystemManagerResponse url_bases = [ "https?://ssm.(.+).amazonaws.com", + "https?://ssm.(.+).amazonaws.com.cn", ] url_paths = { diff --git a/moto/sts/models.py b/moto/sts/models.py index c7163a335..9ad87358f 100644 --- a/moto/sts/models.py +++ b/moto/sts/models.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import datetime from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds +from moto.sts.utils import random_access_key_id, random_secret_access_key, random_session_token class Token(BaseModel): @@ -26,6 +27,9 @@ class AssumedRole(BaseModel): now = datetime.datetime.utcnow() self.expiration = now + datetime.timedelta(seconds=duration) self.external_id = external_id + self.access_key_id = "ASIA" + random_access_key_id() + self.secret_access_key = random_secret_access_key() + self.session_token = random_session_token() @property def expiration_ISO8601(self): @@ -46,5 +50,8 @@ class STSBackend(BaseBackend): role = AssumedRole(**kwargs) return role + def assume_role_with_web_identity(self, **kwargs): + return self.assume_role(**kwargs) + sts_backend = STSBackend() diff --git a/moto/sts/responses.py b/moto/sts/responses.py index a5abb6b81..fd71a963f 100644 --- a/moto/sts/responses.py +++ b/moto/sts/responses.py @@ -39,6 +39,24 @@ class TokenResponse(BaseResponse): template = self.response_template(ASSUME_ROLE_RESPONSE) return template.render(role=role) + def assume_role_with_web_identity(self): + role_session_name = self.querystring.get('RoleSessionName')[0] + role_arn = self.querystring.get('RoleArn')[0] + + policy = self.querystring.get('Policy', [None])[0] + duration = int(self.querystring.get('DurationSeconds', [3600])[0]) + external_id = self.querystring.get('ExternalId', [None])[0] + + role = sts_backend.assume_role_with_web_identity( + role_session_name=role_session_name, + role_arn=role_arn, + policy=policy, + duration=duration, + external_id=external_id, + ) + template = self.response_template(ASSUME_ROLE_WITH_WEB_IDENTITY_RESPONSE) + return template.render(role=role) + def get_caller_identity(self): template = self.response_template(GET_CALLER_IDENTITY_RESPONSE) return template.render() @@ -84,10 +102,10 @@ ASSUME_ROLE_RESPONSE = """ - BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE - aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY + {{ role.session_token }} + {{ role.secret_access_key }} {{ role.expiration_ISO8601 }} - AKIAIOSFODNN7EXAMPLE + {{ role.access_key_id }} {{ role.arn }} @@ -100,6 +118,27 @@ ASSUME_ROLE_RESPONSE = """ + + + {{ role.session_token }} + {{ role.secret_access_key }} + {{ role.expiration_ISO8601 }} + {{ role.access_key_id }} + + + {{ role.arn }} + ARO123EXAMPLE123:{{ role.session_name }} + + 6 + + + c6104cbe-af31-11e0-8154-cbc7ccf896c7 + +""" + + GET_CALLER_IDENTITY_RESPONSE = """ arn:aws:sts::123456789012:user/moto diff --git a/moto/sts/utils.py b/moto/sts/utils.py new file mode 100644 index 000000000..8e6129728 --- /dev/null +++ b/moto/sts/utils.py @@ -0,0 +1,25 @@ +import base64 +import os +import random +import string + +import six + +ACCOUNT_SPECIFIC_ACCESS_KEY_PREFIX = "8NWMTLYQ" +SESSION_TOKEN_PREFIX = "FQoGZXIvYXdzEBYaD" + + +def random_access_key_id(): + return ACCOUNT_SPECIFIC_ACCESS_KEY_PREFIX + ''.join(six.text_type( + random.choice( + string.ascii_uppercase + string.digits + )) for _ in range(8) + ) + + +def random_secret_access_key(): + return base64.b64encode(os.urandom(30)).decode() + + +def random_session_token(): + return SESSION_TOKEN_PREFIX + base64.b64encode(os.urandom(266))[len(SESSION_TOKEN_PREFIX):].decode() diff --git a/moto/swf/models/__init__.py b/moto/swf/models/__init__.py index 833596a23..a8bc57f40 100644 --- a/moto/swf/models/__init__.py +++ b/moto/swf/models/__init__.py @@ -21,7 +21,7 @@ from .history_event import HistoryEvent # flake8: noqa from .timeout import Timeout # flake8: noqa from .workflow_type import WorkflowType # flake8: noqa from .workflow_execution import WorkflowExecution # flake8: noqa - +from time import sleep KNOWN_SWF_TYPES = { "activity": ActivityType, @@ -198,6 +198,9 @@ class SWFBackend(BaseBackend): wfe.start_decision_task(task.task_token, identity=identity) return task else: + # Sleeping here will prevent clients that rely on the timeout from + # entering in a busy waiting loop. + sleep(1) return None def count_pending_decision_tasks(self, domain_name, task_list): @@ -293,6 +296,9 @@ class SWFBackend(BaseBackend): wfe.start_activity_task(task.task_token, identity=identity) return task else: + # Sleeping here will prevent clients that rely on the timeout from + # entering in a busy waiting loop. + sleep(1) return None def count_pending_activity_tasks(self, domain_name, task_list): @@ -379,6 +385,14 @@ class SWFBackend(BaseBackend): if details: activity_task.details = details + def signal_workflow_execution(self, domain_name, signal_name, workflow_id, input=None, run_id=None): + # process timeouts on all objects + self._process_timeouts() + domain = self._get_domain(domain_name) + wfe = domain.get_workflow_execution( + workflow_id, run_id=run_id, raise_if_closed=True) + wfe.signal(signal_name, input) + swf_backends = {} for region in boto.swf.regions(): diff --git a/moto/swf/models/history_event.py b/moto/swf/models/history_event.py index 0dc21a09a..e7ddfd924 100644 --- a/moto/swf/models/history_event.py +++ b/moto/swf/models/history_event.py @@ -25,6 +25,7 @@ SUPPORTED_HISTORY_EVENT_TYPES = ( "ActivityTaskTimedOut", "DecisionTaskTimedOut", "WorkflowExecutionTimedOut", + "WorkflowExecutionSignaled" ) diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index 2f41c287f..3d01f9192 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -599,6 +599,14 @@ class WorkflowExecution(BaseModel): self.close_status = "TERMINATED" self.close_cause = "OPERATOR_INITIATED" + def signal(self, signal_name, input): + self._add_event( + "WorkflowExecutionSignaled", + signal_name=signal_name, + input=input, + ) + self.schedule_decision_task() + def first_timeout(self): if not self.open or not self.start_timestamp: return None diff --git a/moto/swf/responses.py b/moto/swf/responses.py index 1ee89bfc1..6f002d3d4 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -326,9 +326,9 @@ class SWFResponse(BaseResponse): _workflow_type = self._params["workflowType"] workflow_name = _workflow_type["name"] workflow_version = _workflow_type["version"] - _default_task_list = self._params.get("defaultTaskList") - if _default_task_list: - task_list = _default_task_list.get("name") + _task_list = self._params.get("taskList") + if _task_list: + task_list = _task_list.get("name") else: task_list = None child_policy = self._params.get("childPolicy") @@ -507,3 +507,20 @@ class SWFResponse(BaseResponse): ) # TODO: make it dynamic when we implement activity tasks cancellation return json.dumps({"cancelRequested": False}) + + def signal_workflow_execution(self): + domain_name = self._params["domain"] + signal_name = self._params["signalName"] + workflow_id = self._params["workflowId"] + _input = self._params["input"] + run_id = self._params["runId"] + + self._check_string(domain_name) + self._check_string(signal_name) + self._check_string(workflow_id) + self._check_none_or_string(_input) + self._check_none_or_string(run_id) + + self.swf_backend.signal_workflow_execution( + domain_name, signal_name, workflow_id, _input, run_id) + return "" diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py index 6e2164d63..135796054 100644 --- a/moto/xray/mock_client.py +++ b/moto/xray/mock_client.py @@ -51,7 +51,7 @@ def mock_xray_client(f): aws_xray_sdk.core.xray_recorder._emitter = MockEmitter() try: - f(*args, **kwargs) + return f(*args, **kwargs) finally: if old_xray_context_var is None: diff --git a/other_langs/sqsSample.scala b/other_langs/sqsSample.scala new file mode 100644 index 000000000..f83daaa22 --- /dev/null +++ b/other_langs/sqsSample.scala @@ -0,0 +1,25 @@ +package com.amazonaws.examples + +import com.amazonaws.client.builder.AwsClientBuilder +import com.amazonaws.regions.{Region, Regions} +import com.amazonaws.services.sqs.AmazonSQSClientBuilder + +import scala.jdk.CollectionConverters._ + +object QueueTest extends App { + val region = Region.getRegion(Regions.US_WEST_2).getName + val serviceEndpoint = "http://localhost:5000" + + val amazonSqs = AmazonSQSClientBuilder.standard() + .withEndpointConfiguration( + new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, region)) + .build + + val queueName = "my-first-queue" + amazonSqs.createQueue(queueName) + + val urls = amazonSqs.listQueues().getQueueUrls.asScala + println("Listing queues") + println(urls.map(url => s" - $url").mkString(System.lineSeparator)) + println() +} diff --git a/requirements-dev.txt b/requirements-dev.txt index cdd88ab2f..f87ab3db6 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,17 +1,17 @@ -r requirements.txt mock nose -sure==1.2.24 +sure==1.4.11 coverage -flake8==3.4.1 +flake8==3.5.0 freezegun flask boto>=2.45.0 boto3>=1.4.4 -botocore>=1.5.77 +botocore>=1.12.13 six>=1.9 prompt-toolkit==1.0.14 click==6.7 inflection==0.3.1 -lxml==4.0.0 +lxml==4.2.3 beautifulsoup4==4.6.0 diff --git a/scripts/bump_version b/scripts/bump_version new file mode 100755 index 000000000..d1af3a84b --- /dev/null +++ b/scripts/bump_version @@ -0,0 +1,27 @@ +#!/bin/bash + +main() { + set -euo pipefail # Bash safemode + + local version=$1 + if [[ -z "${version}" ]]; then + echo "USAGE: $0 1.3.2" + echo "Provide a new version number as an argument to bump the version" + echo -n "Current:" + grep version= setup.py + return 1 + fi + + &>/dev/null which bumpversion || pip install bumpversion + bumpversion --new-version ${version} patch + + git checkout -b version-${version} + # Commit the new version + git commit -a -m "bumping to version ${version}" + # Commit an updated IMPLEMENTATION_COVERAGE.md + make implementation_coverage || true + # Open a PR + open https://github.com/spulec/moto/compare/master...version-${version} +} + +main $@ diff --git a/scripts/get_amis.py b/scripts/get_amis.py index 81f69c5dd..687dab2d4 100644 --- a/scripts/get_amis.py +++ b/scripts/get_amis.py @@ -1,7 +1,7 @@ import boto3 import json -# Taken from free tear list when creating an instance +# Taken from free tier list when creating an instance instances = [ 'ami-760aaa0f', 'ami-bb9a6bc2', 'ami-35e92e4c', 'ami-785db401', 'ami-b7e93bce', 'ami-dca37ea5', 'ami-999844e0', 'ami-9b32e8e2', 'ami-f8e54081', 'ami-bceb39c5', 'ami-03cf127a', 'ami-1ecc1e67', 'ami-c2ff2dbb', 'ami-12c6146b', diff --git a/scripts/implementation_coverage.py b/scripts/implementation_coverage.py index 245784cb0..4e385e1d6 100755 --- a/scripts/implementation_coverage.py +++ b/scripts/implementation_coverage.py @@ -1,17 +1,22 @@ #!/usr/bin/env python import moto +import os from botocore import xform_name from botocore.session import Session import boto3 +script_dir = os.path.dirname(os.path.abspath(__file__)) + + def get_moto_implementation(service_name): - if not hasattr(moto, service_name): + service_name_standardized = service_name.replace("-", "") if "-" in service_name else service_name + if not hasattr(moto, service_name_standardized): return None - module = getattr(moto, service_name) + module = getattr(moto, service_name_standardized) if module is None: return None - mock = getattr(module, "mock_{}".format(service_name)) + mock = getattr(module, "mock_{}".format(service_name_standardized)) if mock is None: return None backends = list(mock().backends.values()) @@ -42,9 +47,8 @@ def calculate_implementation_coverage(): return coverage -def print_implementation_coverage(): - coverage = calculate_implementation_coverage() - for service_name in coverage: +def print_implementation_coverage(coverage): + for service_name in sorted(coverage): implemented = coverage.get(service_name)['implemented'] not_implemented = coverage.get(service_name)['not_implemented'] operations = sorted(implemented + not_implemented) @@ -65,5 +69,39 @@ def print_implementation_coverage(): print("- [ ] {}".format(op)) +def write_implementation_coverage_to_file(coverage): + # try deleting the implementation coverage file + try: + os.remove("../IMPLEMENTATION_COVERAGE.md") + except OSError: + pass + + implementation_coverage_file = "{}/../IMPLEMENTATION_COVERAGE.md".format(script_dir) + # rewrite the implementation coverage file with updated values + print("Writing to {}".format(implementation_coverage_file)) + with open(implementation_coverage_file, "a+") as file: + for service_name in sorted(coverage): + implemented = coverage.get(service_name)['implemented'] + not_implemented = coverage.get(service_name)['not_implemented'] + operations = sorted(implemented + not_implemented) + + if implemented and not_implemented: + percentage_implemented = int(100.0 * len(implemented) / (len(implemented) + len(not_implemented))) + elif implemented: + percentage_implemented = 100 + else: + percentage_implemented = 0 + + file.write("\n") + file.write("## {} - {}% implemented\n".format(service_name, percentage_implemented)) + for op in operations: + if op in implemented: + file.write("- [X] {}\n".format(op)) + else: + file.write("- [ ] {}\n".format(op)) + + if __name__ == '__main__': - print_implementation_coverage() + cov = calculate_implementation_coverage() + write_implementation_coverage_to_file(cov) + print_implementation_coverage(cov) diff --git a/scripts/update_managed_policies.py b/scripts/update_managed_policies.py index 5b60660f6..de7058fd7 100755 --- a/scripts/update_managed_policies.py +++ b/scripts/update_managed_policies.py @@ -48,7 +48,8 @@ for policy_name in policies: PolicyArn=policies[policy_name]['Arn'], VersionId=policies[policy_name]['DefaultVersionId']) for key in response['PolicyVersion']: - policies[policy_name][key] = response['PolicyVersion'][key] + if key != "CreateDate": # the policy's CreateDate should not be overwritten by its version's CreateDate + policies[policy_name][key] = response['PolicyVersion'][key] with open(output_file, 'w') as f: triple_quote = '\"\"\"' diff --git a/setup.cfg b/setup.cfg index 3c6e79cf3..fb04c16a8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,8 @@ +[nosetests] +verbosity=1 +detailed-errors=1 +with-coverage=1 +cover-package=moto + [bdist_wheel] universal=1 diff --git a/setup.py b/setup.py index fdd5b5a48..3e14101d6 100755 --- a/setup.py +++ b/setup.py @@ -1,28 +1,54 @@ #!/usr/bin/env python from __future__ import unicode_literals +import codecs +import os +import re import setuptools from setuptools import setup, find_packages import sys +# Borrowed from pip at https://github.com/pypa/pip/blob/62c27dee45625e1b63d1e023b0656310f276e050/setup.py#L11-L15 +here = os.path.abspath(os.path.dirname(__file__)) + +def read(*parts): + # intentionally *not* adding an encoding option to open, See: + # https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690 + with codecs.open(os.path.join(here, *parts), 'r') as fp: + return fp.read() + + +def get_version(): + version_file = read('moto', '__init__.py') + version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', + version_file, re.MULTILINE) + if version_match: + return version_match.group(1) + raise RuntimeError('Unable to find version string.') + + install_requires = [ - "Jinja2>=2.8", + "Jinja2>=2.10.1", "boto>=2.36.0", - "boto3>=1.2.1", - "botocore>=1.7.12", - "cookies", - "cryptography>=2.0.0", + "boto3>=1.9.86", + "botocore>=1.12.86", + "cryptography>=2.3.0", "requests>=2.5", "xmltodict", "six>1.9", "werkzeug", - "pyaml", + "PyYAML>=5.1", "pytz", "python-dateutil<3.0.0,>=2.1", + "python-jose<4.0.0", "mock", "docker>=2.5.1", - "jsondiff==1.1.1", - "aws-xray-sdk>=0.93", + "jsondiff==1.1.2", + "aws-xray-sdk!=0.96,>=0.93", + "responses>=0.9.0", + "idna<2.9,>=2.5", + "cfn-lint", + "sshpubkeys>=3.1.0,<4.0" ] extras_require = { @@ -39,9 +65,11 @@ else: setup( name='moto', - version='1.1.24', + version=get_version(), description='A library that allows your python tests to easily' ' mock out the boto library', + long_description=read('README.md'), + long_description_content_type='text/markdown', author='Steve Pulec', author_email='spulec@gmail.com', url='https://github.com/spulec/moto', @@ -60,10 +88,9 @@ setup( "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.3", - "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", "License :: OSI Approved :: Apache Software License", "Topic :: Software Development :: Testing", ], diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 11230658b..5954de8ca 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1,15 +1,13 @@ from __future__ import unicode_literals -from datetime import datetime -from dateutil.tz import tzutc import boto3 from freezegun import freeze_time import requests import sure # noqa from botocore.exceptions import ClientError -from moto.packages.responses import responses +import responses from moto import mock_apigateway, settings @@ -965,3 +963,157 @@ def test_http_proxying_integration(): if not settings.TEST_SERVER_MODE: requests.get(deploy_url).content.should.equal(b"a fake response") + + +@mock_apigateway +def test_api_keys(): + region_name = 'us-west-2' + client = boto3.client('apigateway', region_name=region_name) + response = client.get_api_keys() + len(response['items']).should.equal(0) + + apikey_value = '12345' + apikey_name = 'TESTKEY1' + payload = {'value': apikey_value, 'name': apikey_name} + response = client.create_api_key(**payload) + apikey = client.get_api_key(apiKey=response['id']) + apikey['name'].should.equal(apikey_name) + apikey['value'].should.equal(apikey_value) + + apikey_name = 'TESTKEY2' + payload = {'name': apikey_name } + response = client.create_api_key(**payload) + apikey_id = response['id'] + apikey = client.get_api_key(apiKey=apikey_id) + apikey['name'].should.equal(apikey_name) + len(apikey['value']).should.equal(40) + + response = client.get_api_keys() + len(response['items']).should.equal(2) + + client.delete_api_key(apiKey=apikey_id) + + response = client.get_api_keys() + len(response['items']).should.equal(1) + +@mock_apigateway +def test_usage_plans(): + region_name = 'us-west-2' + client = boto3.client('apigateway', region_name=region_name) + response = client.get_usage_plans() + len(response['items']).should.equal(0) + + usage_plan_name = 'TEST-PLAN' + payload = {'name': usage_plan_name} + response = client.create_usage_plan(**payload) + usage_plan = client.get_usage_plan(usagePlanId=response['id']) + usage_plan['name'].should.equal(usage_plan_name) + usage_plan['apiStages'].should.equal([]) + + usage_plan_name = 'TEST-PLAN-2' + usage_plan_description = 'Description' + usage_plan_quota = {'limit': 10, 'period': 'DAY', 'offset': 0} + usage_plan_throttle = {'rateLimit': 2, 'burstLimit': 1} + usage_plan_api_stages = [{'apiId': 'foo', 'stage': 'bar'}] + payload = {'name': usage_plan_name, 'description': usage_plan_description, 'quota': usage_plan_quota, 'throttle': usage_plan_throttle, 'apiStages': usage_plan_api_stages} + response = client.create_usage_plan(**payload) + usage_plan_id = response['id'] + usage_plan = client.get_usage_plan(usagePlanId=usage_plan_id) + usage_plan['name'].should.equal(usage_plan_name) + usage_plan['description'].should.equal(usage_plan_description) + usage_plan['apiStages'].should.equal(usage_plan_api_stages) + usage_plan['throttle'].should.equal(usage_plan_throttle) + usage_plan['quota'].should.equal(usage_plan_quota) + + response = client.get_usage_plans() + len(response['items']).should.equal(2) + + client.delete_usage_plan(usagePlanId=usage_plan_id) + + response = client.get_usage_plans() + len(response['items']).should.equal(1) + +@mock_apigateway +def test_usage_plan_keys(): + region_name = 'us-west-2' + usage_plan_id = 'test_usage_plan_id' + client = boto3.client('apigateway', region_name=region_name) + usage_plan_id = "test" + + # Create an API key so we can use it + key_name = 'test-api-key' + response = client.create_api_key(name=key_name) + key_id = response["id"] + key_value = response["value"] + + # Get current plan keys (expect none) + response = client.get_usage_plan_keys(usagePlanId=usage_plan_id) + len(response['items']).should.equal(0) + + # Create usage plan key + key_type = 'API_KEY' + payload = {'usagePlanId': usage_plan_id, 'keyId': key_id, 'keyType': key_type } + response = client.create_usage_plan_key(**payload) + usage_plan_key_id = response["id"] + + # Get current plan keys (expect 1) + response = client.get_usage_plan_keys(usagePlanId=usage_plan_id) + len(response['items']).should.equal(1) + + # Get a single usage plan key and check it matches the created one + usage_plan_key = client.get_usage_plan_key(usagePlanId=usage_plan_id, keyId=usage_plan_key_id) + usage_plan_key['name'].should.equal(key_name) + usage_plan_key['id'].should.equal(key_id) + usage_plan_key['type'].should.equal(key_type) + usage_plan_key['value'].should.equal(key_value) + + # Delete usage plan key + client.delete_usage_plan_key(usagePlanId=usage_plan_id, keyId=key_id) + + # Get current plan keys (expect none) + response = client.get_usage_plan_keys(usagePlanId=usage_plan_id) + len(response['items']).should.equal(0) + +@mock_apigateway +def test_create_usage_plan_key_non_existent_api_key(): + region_name = 'us-west-2' + usage_plan_id = 'test_usage_plan_id' + client = boto3.client('apigateway', region_name=region_name) + usage_plan_id = "test" + + # Attempt to create a usage plan key for a API key that doesn't exists + payload = {'usagePlanId': usage_plan_id, 'keyId': 'non-existent', 'keyType': 'API_KEY' } + client.create_usage_plan_key.when.called_with(**payload).should.throw(ClientError) + + +@mock_apigateway +def test_get_usage_plans_using_key_id(): + region_name = 'us-west-2' + client = boto3.client('apigateway', region_name=region_name) + + # Create 2 Usage Plans + # one will be attached to an API Key, the other will remain unattached + attached_plan = client.create_usage_plan(name='Attached') + unattached_plan = client.create_usage_plan(name='Unattached') + + # Create an API key + # to attach to the usage plan + key_name = 'test-api-key' + response = client.create_api_key(name=key_name) + key_id = response["id"] + + # Create a Usage Plan Key + # Attached the Usage Plan and API Key + key_type = 'API_KEY' + payload = {'usagePlanId': attached_plan['id'], 'keyId': key_id, 'keyType': key_type} + response = client.create_usage_plan_key(**payload) + + # All usage plans should be returned when keyId is not included + all_plans = client.get_usage_plans() + len(all_plans['items']).should.equal(2) + + # Only the usage plan attached to the given api key are included + only_plans_with_key = client.get_usage_plans(keyId=key_id) + len(only_plans_with_key['items']).should.equal(1) + only_plans_with_key['items'][0]['name'].should.equal(attached_plan['name']) + only_plans_with_key['items'][0]['id'].should.equal(attached_plan['id']) diff --git a/tests/test_apigateway/test_server.py b/tests/test_apigateway/test_server.py index f2a29e253..953d942cc 100644 --- a/tests/test_apigateway/test_server.py +++ b/tests/test_apigateway/test_server.py @@ -1,5 +1,6 @@ from __future__ import unicode_literals import sure # noqa +import json import moto.server as server @@ -9,8 +10,82 @@ Test the different server responses def test_list_apis(): - backend = server.create_backend_app("apigateway") + backend = server.create_backend_app('apigateway') test_client = backend.test_client() res = test_client.get('/restapis') res.data.should.equal(b'{"item": []}') + +def test_usage_plans_apis(): + backend = server.create_backend_app('apigateway') + test_client = backend.test_client() + + # List usage plans (expect empty) + res = test_client.get('/usageplans') + json.loads(res.data)["item"].should.have.length_of(0) + + # Create usage plan + res = test_client.post('/usageplans', data=json.dumps({'name': 'test'})) + created_plan = json.loads(res.data) + created_plan['name'].should.equal('test') + + # List usage plans (expect 1 plan) + res = test_client.get('/usageplans') + json.loads(res.data)["item"].should.have.length_of(1) + + # Get single usage plan + res = test_client.get('/usageplans/{0}'.format(created_plan["id"])) + fetched_plan = json.loads(res.data) + fetched_plan.should.equal(created_plan) + + # Delete usage plan + res = test_client.delete('/usageplans/{0}'.format(created_plan["id"])) + res.data.should.equal(b'{}') + + # List usage plans (expect empty again) + res = test_client.get('/usageplans') + json.loads(res.data)["item"].should.have.length_of(0) + +def test_usage_plans_keys(): + backend = server.create_backend_app('apigateway') + test_client = backend.test_client() + usage_plan_id = 'test_plan_id' + + # Create API key to be used in tests + res = test_client.post('/apikeys', data=json.dumps({'name': 'test'})) + created_api_key = json.loads(res.data) + + # List usage plans keys (expect empty) + res = test_client.get('/usageplans/{0}/keys'.format(usage_plan_id)) + json.loads(res.data)["item"].should.have.length_of(0) + + # Create usage plan key + res = test_client.post('/usageplans/{0}/keys'.format(usage_plan_id), data=json.dumps({'keyId': created_api_key["id"], 'keyType': 'API_KEY'})) + created_usage_plan_key = json.loads(res.data) + + # List usage plans keys (expect 1 key) + res = test_client.get('/usageplans/{0}/keys'.format(usage_plan_id)) + json.loads(res.data)["item"].should.have.length_of(1) + + # Get single usage plan key + res = test_client.get('/usageplans/{0}/keys/{1}'.format(usage_plan_id, created_api_key["id"])) + fetched_plan_key = json.loads(res.data) + fetched_plan_key.should.equal(created_usage_plan_key) + + # Delete usage plan key + res = test_client.delete('/usageplans/{0}/keys/{1}'.format(usage_plan_id, created_api_key["id"])) + res.data.should.equal(b'{}') + + # List usage plans keys (expect to be empty again) + res = test_client.get('/usageplans/{0}/keys'.format(usage_plan_id)) + json.loads(res.data)["item"].should.have.length_of(0) + +def test_create_usage_plans_key_non_existent_api_key(): + backend = server.create_backend_app('apigateway') + test_client = backend.test_client() + usage_plan_id = 'test_plan_id' + + # Create usage plan key with non-existent api key + res = test_client.post('/usageplans/{0}/keys'.format(usage_plan_id), data=json.dumps({'keyId': 'non-existent', 'keyType': 'API_KEY'})) + res.status_code.should.equal(404) + diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 5ed6c3aa5..2df7bf30f 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -7,14 +7,19 @@ from boto.ec2.autoscale.group import AutoScalingGroup from boto.ec2.autoscale import Tag import boto.ec2.elb import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_elb, mock_autoscaling_deprecated, mock_ec2 from tests.helpers import requires_boto_gte +from utils import setup_networking, setup_networking_deprecated, setup_instance_with_networking + @mock_autoscaling_deprecated @mock_elb_deprecated def test_create_autoscaling_group(): + mocked_networking = setup_networking_deprecated() elb_conn = boto.ec2.elb.connect_to_region('us-east-1') elb_conn.create_load_balancer( 'test_lb', zones=[], listeners=[(80, 8080, 'http')]) @@ -29,7 +34,7 @@ def test_create_autoscaling_group(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a', 'us-east-1b'], default_cooldown=60, desired_capacity=2, health_check_period=100, @@ -39,7 +44,10 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["test_lb"], placement_group="test_placement", - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + ), termination_policies=["OldestInstance", "NewestInstance"], tags=[Tag( resource_id='tester_group', @@ -54,12 +62,15 @@ def test_create_autoscaling_group(): group = conn.get_all_groups()[0] group.name.should.equal('tester_group') set(group.availability_zones).should.equal( - set(['us-east-1c', 'us-east-1b'])) + set(['us-east-1a', 'us-east-1b'])) group.desired_capacity.should.equal(2) group.max_size.should.equal(2) group.min_size.should.equal(2) group.instances.should.have.length_of(2) - group.vpc_zone_identifier.should.equal('subnet-1234abcd') + group.vpc_zone_identifier.should.equal("{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + )) group.launch_config_name.should.equal('tester') group.default_cooldown.should.equal(60) group.health_check_period.should.equal(100) @@ -80,6 +91,8 @@ def test_create_autoscaling_group(): def test_create_autoscaling_groups_defaults(): """ Test with the minimum inputs and check that all of the proper defaults are assigned for the other attributes """ + + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -93,6 +106,7 @@ def test_create_autoscaling_groups_defaults(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -103,9 +117,9 @@ def test_create_autoscaling_groups_defaults(): group.launch_config_name.should.equal('tester') # Defaults - list(group.availability_zones).should.equal([]) + list(group.availability_zones).should.equal(['us-east-1a']) # subnet1 group.desired_capacity.should.equal(2) - group.vpc_zone_identifier.should.equal('') + group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.default_cooldown.should.equal(300) group.health_check_period.should.equal(300) group.health_check_type.should.equal("EC2") @@ -117,6 +131,7 @@ def test_create_autoscaling_groups_defaults(): @mock_autoscaling def test_list_many_autoscaling_groups(): + mocked_networking = setup_networking() conn = boto3.client('autoscaling', region_name='us-east-1') conn.create_launch_configuration(LaunchConfigurationName='TestLC') @@ -124,7 +139,8 @@ def test_list_many_autoscaling_groups(): conn.create_auto_scaling_group(AutoScalingGroupName='TestGroup%d' % i, MinSize=1, MaxSize=2, - LaunchConfigurationName='TestLC') + LaunchConfigurationName='TestLC', + VPCZoneIdentifier=mocked_networking['subnet1']) response = conn.describe_auto_scaling_groups() groups = response["AutoScalingGroups"] @@ -142,6 +158,7 @@ def test_list_many_autoscaling_groups(): @mock_autoscaling @mock_ec2 def test_list_many_autoscaling_groups(): + mocked_networking = setup_networking() conn = boto3.client('autoscaling', region_name='us-east-1') conn.create_launch_configuration(LaunchConfigurationName='TestLC') @@ -155,7 +172,8 @@ def test_list_many_autoscaling_groups(): "PropagateAtLaunch": True, "Key": 'TestTagKey1', "Value": 'TestTagValue1' - }]) + }], + VPCZoneIdentifier=mocked_networking['subnet1']) ec2 = boto3.client('ec2', region_name='us-east-1') instances = ec2.describe_instances() @@ -167,6 +185,7 @@ def test_list_many_autoscaling_groups(): @mock_autoscaling_deprecated def test_autoscaling_group_describe_filter(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -180,6 +199,7 @@ def test_autoscaling_group_describe_filter(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) group.name = 'tester_group2' @@ -194,6 +214,7 @@ def test_autoscaling_group_describe_filter(): @mock_autoscaling_deprecated def test_autoscaling_update(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -204,27 +225,30 @@ def test_autoscaling_update(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], desired_capacity=2, max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] - group.vpc_zone_identifier.should.equal('subnet-1234abcd') + group.availability_zones.should.equal(['us-east-1a']) + group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) - group.vpc_zone_identifier = 'subnet-5678efgh' + group.availability_zones = ['us-east-1b'] + group.vpc_zone_identifier = mocked_networking['subnet2'] group.update() group = conn.get_all_groups()[0] - group.vpc_zone_identifier.should.equal('subnet-5678efgh') + group.availability_zones.should.equal(['us-east-1b']) + group.vpc_zone_identifier.should.equal(mocked_networking['subnet2']) @mock_autoscaling_deprecated def test_autoscaling_tags_update(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -235,18 +259,18 @@ def test_autoscaling_tags_update(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', tags=[Tag( resource_id='tester_group', key='test_key', value='test_value', propagate_at_launch=True )], + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -267,6 +291,7 @@ def test_autoscaling_tags_update(): @mock_autoscaling_deprecated def test_autoscaling_group_delete(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -280,6 +305,7 @@ def test_autoscaling_group_delete(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -292,7 +318,8 @@ def test_autoscaling_group_delete(): @mock_ec2_deprecated @mock_autoscaling_deprecated def test_autoscaling_group_describe_instances(): - conn = boto.connect_autoscale() + mocked_networking = setup_networking_deprecated() + conn = boto.ec2.autoscale.connect_to_region('us-east-1') config = LaunchConfiguration( name='tester', image_id='ami-abcd1234', @@ -305,6 +332,7 @@ def test_autoscaling_group_describe_instances(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -314,7 +342,7 @@ def test_autoscaling_group_describe_instances(): instances[0].health_status.should.equal('Healthy') autoscale_instance_ids = [instance.instance_id for instance in instances] - ec2_conn = boto.connect_ec2() + ec2_conn = boto.ec2.connect_to_region('us-east-1') reservations = ec2_conn.get_all_instances() instances = reservations[0].instances instances.should.have.length_of(2) @@ -326,6 +354,7 @@ def test_autoscaling_group_describe_instances(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_up(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -336,12 +365,12 @@ def test_set_desired_capacity_up(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -361,6 +390,7 @@ def test_set_desired_capacity_up(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_down(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -371,12 +401,12 @@ def test_set_desired_capacity_down(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -396,6 +426,7 @@ def test_set_desired_capacity_down(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_the_same(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -406,12 +437,12 @@ def test_set_desired_capacity_the_same(): group = AutoScalingGroup( name='tester_group', - availability_zones=['us-east-1c', 'us-east-1b'], + availability_zones=['us-east-1a'], desired_capacity=2, max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -431,6 +462,7 @@ def test_set_desired_capacity_the_same(): @mock_autoscaling_deprecated @mock_elb_deprecated def test_autoscaling_group_with_elb(): + mocked_networking = setup_networking_deprecated() elb_conn = boto.connect_elb() zones = ['us-east-1a', 'us-east-1b'] ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -451,6 +483,7 @@ def test_autoscaling_group_with_elb(): min_size=2, launch_config=config, load_balancers=["my-lb"], + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] @@ -488,6 +521,7 @@ Boto3 @mock_autoscaling @mock_elb def test_describe_load_balancers(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 elb_client = boto3.client('elb', region_name='us-east-1') @@ -514,16 +548,19 @@ def test_describe_load_balancers(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + assert response['ResponseMetadata']['RequestId'] list(response['LoadBalancers']).should.have.length_of(1) response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') @mock_autoscaling @mock_elb def test_create_elb_and_autoscaling_group_no_relationship(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 ELB_NAME = 'my-elb' @@ -546,6 +583,7 @@ def test_create_elb_and_autoscaling_group_no_relationship(): MinSize=0, MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, + VPCZoneIdentifier=mocked_networking['subnet1'], ) # autoscaling group and elb should have no relationship @@ -562,6 +600,7 @@ def test_create_elb_and_autoscaling_group_no_relationship(): @mock_autoscaling @mock_elb def test_attach_load_balancer(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 elb_client = boto3.client('elb', region_name='us-east-1') @@ -587,7 +626,8 @@ def test_attach_load_balancer(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.attach_load_balancers( @@ -609,6 +649,7 @@ def test_attach_load_balancer(): @mock_autoscaling @mock_elb def test_detach_load_balancer(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 elb_client = boto3.client('elb', region_name='us-east-1') @@ -635,7 +676,8 @@ def test_detach_load_balancer(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.detach_load_balancers( @@ -654,6 +696,7 @@ def test_detach_load_balancer(): @mock_autoscaling def test_create_autoscaling_group_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -676,59 +719,77 @@ def test_create_autoscaling_group_boto3(): 'Key': 'not-propogated-tag-key', 'Value': 'not-propogate-tag-value', 'PropagateAtLaunch': False - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=False, ) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) +@mock_autoscaling +def test_create_autoscaling_group_from_instance(): + autoscaling_group_name = 'test_asg' + image_id = 'ami-0cc293023f983ed53' + instance_type = 't2.micro' + + mocked_instance_with_networking = setup_instance_with_networking(image_id, instance_type) + client = boto3.client('autoscaling', region_name='us-east-1') + response = client.create_auto_scaling_group( + AutoScalingGroupName=autoscaling_group_name, + InstanceId=mocked_instance_with_networking['instance'], + MinSize=1, + MaxSize=3, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }, + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'not-propogated-tag-key', + 'Value': 'not-propogate-tag-value', + 'PropagateAtLaunch': False + }], + VPCZoneIdentifier=mocked_instance_with_networking['subnet1'], + NewInstancesProtectedFromScaleIn=False, + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + describe_launch_configurations_response = client.describe_launch_configurations() + describe_launch_configurations_response['LaunchConfigurations'].should.have.length_of(1) + launch_configuration_from_instance = describe_launch_configurations_response['LaunchConfigurations'][0] + launch_configuration_from_instance['LaunchConfigurationName'].should.equal('test_asg') + launch_configuration_from_instance['ImageId'].should.equal(image_id) + launch_configuration_from_instance['InstanceType'].should.equal(instance_type) + + +@mock_autoscaling +def test_create_autoscaling_group_from_invalid_instance_id(): + invalid_instance_id = 'invalid_instance' + + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + with assert_raises(ClientError) as ex: + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + InstanceId=invalid_instance_id, + MinSize=9, + MaxSize=15, + DesiredCapacity=12, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=False, + ) + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Code'].should.equal('ValidationError') + ex.exception.response['Error']['Message'].should.equal('Instance [{0}] is invalid.'.format(invalid_instance_id)) + + @mock_autoscaling def test_describe_autoscaling_groups_boto3(): - client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( - LaunchConfigurationName='test_launch_configuration' - ) - _ = client.create_auto_scaling_group( - AutoScalingGroupName='test_asg', - LaunchConfigurationName='test_launch_configuration', - MinSize=0, - MaxSize=20, - DesiredCapacity=5 - ) - response = client.describe_auto_scaling_groups( - AutoScalingGroupNames=["test_asg"] - ) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['AutoScalingGroups'][0][ - 'AutoScalingGroupName'].should.equal('test_asg') - - -@mock_autoscaling -def test_update_autoscaling_group_boto3(): - client = boto3.client('autoscaling', region_name='us-east-1') - _ = client.create_launch_configuration( - LaunchConfigurationName='test_launch_configuration' - ) - _ = client.create_auto_scaling_group( - AutoScalingGroupName='test_asg', - LaunchConfigurationName='test_launch_configuration', - MinSize=0, - MaxSize=20, - DesiredCapacity=5 - ) - - response = client.update_auto_scaling_group( - AutoScalingGroupName='test_asg', - MinSize=1, - ) - - response = client.describe_auto_scaling_groups( - AutoScalingGroupNames=["test_asg"] - ) - response['AutoScalingGroups'][0]['MinSize'].should.equal(1) - - -@mock_autoscaling -def test_autoscaling_taqs_update_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -739,14 +800,168 @@ def test_autoscaling_taqs_update_boto3(): MinSize=0, MaxSize=20, DesiredCapacity=5, - Tags=[ - { - "ResourceId": 'test_asg', - "Key": 'test_key', - "Value": 'test_value', - "PropagateAtLaunch": True - }, - ] + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, + ) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=["test_asg"] + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + group = response['AutoScalingGroups'][0] + group['AutoScalingGroupName'].should.equal('test_asg') + group['AvailabilityZones'].should.equal(['us-east-1a']) + group['VPCZoneIdentifier'].should.equal(mocked_networking['subnet1']) + group['NewInstancesProtectedFromScaleIn'].should.equal(True) + for instance in group['Instances']: + instance['AvailabilityZone'].should.equal('us-east-1a') + instance['ProtectedFromScaleIn'].should.equal(True) + + +@mock_autoscaling +def test_describe_autoscaling_instances_boto3(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, + ) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=["test_asg"] + ) + instance_ids = [ + instance['InstanceId'] + for instance in response['AutoScalingGroups'][0]['Instances'] + ] + + response = client.describe_auto_scaling_instances(InstanceIds=instance_ids) + for instance in response['AutoScalingInstances']: + instance['AutoScalingGroupName'].should.equal('test_asg') + instance['AvailabilityZone'].should.equal('us-east-1a') + instance['ProtectedFromScaleIn'].should.equal(True) + + +@mock_autoscaling +def test_update_autoscaling_group_boto3(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, + ) + + _ = client.update_auto_scaling_group( + AutoScalingGroupName='test_asg', + MinSize=1, + VPCZoneIdentifier="{subnet1},{subnet2}".format( + subnet1=mocked_networking['subnet1'], + subnet2=mocked_networking['subnet2'], + ), + NewInstancesProtectedFromScaleIn=False, + ) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=["test_asg"] + ) + group = response['AutoScalingGroups'][0] + group['MinSize'].should.equal(1) + set(group['AvailabilityZones']).should.equal({'us-east-1a', 'us-east-1b'}) + group['NewInstancesProtectedFromScaleIn'].should.equal(False) + + +@mock_autoscaling +def test_update_autoscaling_group_min_size_desired_capacity_change(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=2, + MaxSize=20, + DesiredCapacity=3, + VPCZoneIdentifier=mocked_networking['subnet1'], + ) + client.update_auto_scaling_group( + AutoScalingGroupName='test_asg', + MinSize=5, + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg']) + group = response['AutoScalingGroups'][0] + group['DesiredCapacity'].should.equal(5) + group['MinSize'].should.equal(5) + group['Instances'].should.have.length_of(5) + + +@mock_autoscaling +def test_update_autoscaling_group_max_size_desired_capacity_change(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=2, + MaxSize=20, + DesiredCapacity=10, + VPCZoneIdentifier=mocked_networking['subnet1'], + ) + client.update_auto_scaling_group( + AutoScalingGroupName='test_asg', + MaxSize=5, + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg']) + group = response['AutoScalingGroups'][0] + group['DesiredCapacity'].should.equal(5) + group['MaxSize'].should.equal(5) + group['Instances'].should.have.length_of(5) + + +@mock_autoscaling +def test_autoscaling_taqs_update_boto3(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) client.create_or_update_tags(Tags=[{ @@ -769,6 +984,7 @@ def test_autoscaling_taqs_update_boto3(): @mock_autoscaling def test_autoscaling_describe_policies_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -784,7 +1000,8 @@ def test_autoscaling_describe_policies_boto3(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) client.put_scaling_policy( @@ -825,6 +1042,7 @@ def test_autoscaling_describe_policies_boto3(): @mock_autoscaling @mock_ec2 def test_detach_one_instance_decrement(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -835,13 +1053,14 @@ def test_detach_one_instance_decrement(): MinSize=0, MaxSize=2, DesiredCapacity=2, - Tags=[ - {'ResourceId': 'test_asg', - 'ResourceType': 'auto-scaling-group', - 'Key': 'propogated-tag-key', - 'Value': 'propogate-tag-value', - 'PropagateAtLaunch': True - }] + Tags=[{ + 'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] @@ -878,6 +1097,7 @@ def test_detach_one_instance_decrement(): @mock_autoscaling @mock_ec2 def test_detach_one_instance(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -888,13 +1108,14 @@ def test_detach_one_instance(): MinSize=0, MaxSize=2, DesiredCapacity=2, - Tags=[ - {'ResourceId': 'test_asg', - 'ResourceType': 'auto-scaling-group', - 'Key': 'propogated-tag-key', - 'Value': 'propogate-tag-value', - 'PropagateAtLaunch': True - }] + Tags=[{ + 'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] @@ -930,6 +1151,7 @@ def test_detach_one_instance(): @mock_autoscaling @mock_ec2 def test_attach_one_instance(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -940,16 +1162,15 @@ def test_attach_one_instance(): MinSize=0, MaxSize=4, DesiredCapacity=2, - Tags=[ - {'ResourceId': 'test_asg', - 'ResourceType': 'auto-scaling-group', - 'Key': 'propogated-tag-key', - 'Value': 'propogate-tag-value', - 'PropagateAtLaunch': True - }] - ) - response = client.describe_auto_scaling_groups( - AutoScalingGroupNames=['test_asg'] + Tags=[{ + 'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, ) ec2 = boto3.resource('ec2', 'us-east-1') @@ -964,11 +1185,16 @@ def test_attach_one_instance(): response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] ) - response['AutoScalingGroups'][0]['Instances'].should.have.length_of(3) + instances = response['AutoScalingGroups'][0]['Instances'] + instances.should.have.length_of(3) + for instance in instances: + instance['ProtectedFromScaleIn'].should.equal(True) + @mock_autoscaling @mock_ec2 def test_describe_instance_health(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -979,6 +1205,7 @@ def test_describe_instance_health(): MinSize=2, MaxSize=4, DesiredCapacity=2, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( @@ -991,6 +1218,7 @@ def test_describe_instance_health(): @mock_autoscaling @mock_ec2 def test_set_instance_health(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -1001,6 +1229,7 @@ def test_set_instance_health(): MinSize=2, MaxSize=4, DesiredCapacity=2, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( @@ -1018,3 +1247,177 @@ def test_set_instance_health(): instance1 = response['AutoScalingGroups'][0]['Instances'][0] instance1['HealthStatus'].should.equal('Unhealthy') + +@mock_autoscaling +def test_suspend_processes(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + client.create_launch_configuration( + LaunchConfigurationName='lc', + ) + client.create_auto_scaling_group( + LaunchConfigurationName='lc', + AutoScalingGroupName='test-asg', + MinSize=1, + MaxSize=1, + VPCZoneIdentifier=mocked_networking['subnet1'], + ) + + # When we suspend the 'Launch' process on the ASG client + client.suspend_processes( + AutoScalingGroupName='test-asg', + ScalingProcesses=['Launch'] + ) + + res = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test-asg'] + ) + + # The 'Launch' process should, in fact, be suspended + launch_suspended = False + for proc in res['AutoScalingGroups'][0]['SuspendedProcesses']: + if proc.get('ProcessName') == 'Launch': + launch_suspended = True + + assert launch_suspended is True + +@mock_autoscaling +def test_set_instance_protection(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=False, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + instance_ids = [ + instance['InstanceId'] + for instance in response['AutoScalingGroups'][0]['Instances'] + ] + protected = instance_ids[:3] + + _ = client.set_instance_protection( + AutoScalingGroupName='test_asg', + InstanceIds=protected, + ProtectedFromScaleIn=True, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + for instance in response['AutoScalingGroups'][0]['Instances']: + instance['ProtectedFromScaleIn'].should.equal( + instance['InstanceId'] in protected + ) + + +@mock_autoscaling +def test_set_desired_capacity_up_boto3(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, + ) + + _ = client.set_desired_capacity( + AutoScalingGroupName='test_asg', + DesiredCapacity=10, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + instances = response['AutoScalingGroups'][0]['Instances'] + instances.should.have.length_of(10) + for instance in instances: + instance['ProtectedFromScaleIn'].should.equal(True) + + +@mock_autoscaling +def test_set_desired_capacity_down_boto3(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=20, + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=True, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + instance_ids = [ + instance['InstanceId'] + for instance in response['AutoScalingGroups'][0]['Instances'] + ] + unprotected, protected = instance_ids[:2], instance_ids[2:] + + _ = client.set_instance_protection( + AutoScalingGroupName='test_asg', + InstanceIds=unprotected, + ProtectedFromScaleIn=False, + ) + + _ = client.set_desired_capacity( + AutoScalingGroupName='test_asg', + DesiredCapacity=1, + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + group = response['AutoScalingGroups'][0] + group['DesiredCapacity'].should.equal(1) + instance_ids = {instance['InstanceId'] for instance in group['Instances']} + set(protected).should.equal(instance_ids) + set(unprotected).should_not.be.within(instance_ids) # only unprotected killed + + +@mock_autoscaling +@mock_ec2 +def test_terminate_instance_in_autoscaling_group(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + _ = client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=1, + MaxSize=20, + VPCZoneIdentifier=mocked_networking['subnet1'], + NewInstancesProtectedFromScaleIn=False + ) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + original_instance_id = next( + instance['InstanceId'] + for instance in response['AutoScalingGroups'][0]['Instances'] + ) + ec2_client = boto3.client('ec2', region_name='us-east-1') + ec2_client.terminate_instances(InstanceIds=[original_instance_id]) + + response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['test_asg']) + replaced_instance_id = next( + instance['InstanceId'] + for instance in response['AutoScalingGroups'][0]['Instances'] + ) + replaced_instance_id.should_not.equal(original_instance_id) diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py index 89ec4a399..a142fd133 100644 --- a/tests/test_autoscaling/test_elbv2.py +++ b/tests/test_autoscaling/test_elbv2.py @@ -1,24 +1,24 @@ from __future__ import unicode_literals import boto3 +import sure # noqa from moto import mock_autoscaling, mock_ec2, mock_elbv2 +from utils import setup_networking + @mock_elbv2 -@mock_ec2 @mock_autoscaling def test_attach_detach_target_groups(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 client = boto3.client('autoscaling', region_name='us-east-1') elbv2_client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') response = elbv2_client.create_target_group( Name='a-target', Protocol='HTTP', Port=8080, - VpcId=vpc.id, + VpcId=mocked_networking['vpc'], HealthCheckProtocol='HTTP', HealthCheckPort='8080', HealthCheckPath='/', @@ -40,7 +40,7 @@ def test_attach_detach_target_groups(): MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, TargetGroupARNs=[target_group_arn], - VPCZoneIdentifier=vpc.id) + VPCZoneIdentifier=mocked_networking['subnet1']) # create asg without attaching to target group client.create_auto_scaling_group( AutoScalingGroupName='test_asg2', @@ -48,7 +48,7 @@ def test_attach_detach_target_groups(): MinSize=0, MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, - VPCZoneIdentifier=vpc.id) + VPCZoneIdentifier=mocked_networking['subnet2']) response = client.describe_load_balancer_target_groups( AutoScalingGroupName='test_asg') @@ -74,21 +74,18 @@ def test_attach_detach_target_groups(): list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT) @mock_elbv2 -@mock_ec2 @mock_autoscaling def test_detach_all_target_groups(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 client = boto3.client('autoscaling', region_name='us-east-1') elbv2_client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') response = elbv2_client.create_target_group( Name='a-target', Protocol='HTTP', Port=8080, - VpcId=vpc.id, + VpcId=mocked_networking['vpc'], HealthCheckProtocol='HTTP', HealthCheckPort='8080', HealthCheckPath='/', @@ -109,7 +106,7 @@ def test_detach_all_target_groups(): MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, TargetGroupARNs=[target_group_arn], - VPCZoneIdentifier=vpc.id) + VPCZoneIdentifier=mocked_networking['subnet1']) response = client.describe_load_balancer_target_groups( AutoScalingGroupName='test_asg') diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py index 54c64b749..e6b01163f 100644 --- a/tests/test_autoscaling/test_policies.py +++ b/tests/test_autoscaling/test_policies.py @@ -7,8 +7,11 @@ import sure # noqa from moto import mock_autoscaling_deprecated +from utils import setup_networking_deprecated + def setup_autoscale_group(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -22,6 +25,7 @@ def setup_autoscale_group(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) return group diff --git a/tests/test_autoscaling/utils.py b/tests/test_autoscaling/utils.py new file mode 100644 index 000000000..dc38aba3d --- /dev/null +++ b/tests/test_autoscaling/utils.py @@ -0,0 +1,48 @@ +import boto +import boto3 +from boto import vpc as boto_vpc +from moto import mock_ec2, mock_ec2_deprecated + + +@mock_ec2 +def setup_networking(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.11.0.0/16') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='10.11.1.0/24', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='10.11.2.0/24', + AvailabilityZone='us-east-1b') + return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} + +@mock_ec2_deprecated +def setup_networking_deprecated(): + conn = boto_vpc.connect_to_region('us-east-1') + vpc = conn.create_vpc("10.11.0.0/16") + subnet1 = conn.create_subnet( + vpc.id, + "10.11.1.0/24", + availability_zone='us-east-1a') + subnet2 = conn.create_subnet( + vpc.id, + "10.11.2.0/24", + availability_zone='us-east-1b') + return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} + + +@mock_ec2 +def setup_instance_with_networking(image_id, instance_type): + mock_data = setup_networking() + ec2 = boto3.resource('ec2', region_name='us-east-1') + instances = ec2.create_instances( + ImageId=image_id, + InstanceType=instance_type, + MaxCount=1, + MinCount=1, + SubnetId=mock_data['subnet1'] + ) + mock_data['instance'] = instances[0].id + return mock_data diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 7bdfe3256..9ef6fdb0d 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -6,13 +6,16 @@ import boto3 import hashlib import io import json +import time import zipfile import sure # noqa from freezegun import freeze_time -from moto import mock_lambda, mock_s3, mock_ec2, settings +from moto import mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings +from nose.tools import assert_raises +from botocore.exceptions import ClientError -_lambda_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' +_lambda_region = 'us-west-2' def _process_lambda(func_str): @@ -48,6 +51,15 @@ def lambda_handler(event, context): return _process_lambda(func_str) +def get_test_zip_file3(): + pfunc = """ +def lambda_handler(event, context): + print("get_test_zip_file3 success") + return event +""" + return _process_lambda(pfunc) + + @mock_lambda def test_list_functions(): conn = boto3.client('lambda', 'us-west-2') @@ -160,6 +172,56 @@ if settings.TEST_SERVER_MODE: payload.should.equal(msg) +@mock_logs +@mock_sns +@mock_ec2 +@mock_lambda +def test_invoke_function_from_sns(): + logs_conn = boto3.client("logs", region_name="us-west-2") + sns_conn = boto3.client("sns", region_name="us-west-2") + sns_conn.create_topic(Name="some-topic") + topics_json = sns_conn.list_topics() + topics = topics_json["Topics"] + topic_arn = topics[0]['TopicArn'] + + conn = boto3.client('lambda', 'us-west-2') + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': get_test_zip_file3(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + sns_conn.subscribe(TopicArn=topic_arn, Protocol="lambda", Endpoint=result['FunctionArn']) + + result = sns_conn.publish(TopicArn=topic_arn, Message=json.dumps({})) + + start = time.time() + while (time.time() - start) < 30: + result = logs_conn.describe_log_streams(logGroupName='/aws/lambda/testFunction') + log_streams = result.get('logStreams') + if not log_streams: + time.sleep(1) + continue + + assert len(log_streams) == 1 + result = logs_conn.get_log_events(logGroupName='/aws/lambda/testFunction', logStreamName=log_streams[0]['logStreamName']) + for event in result.get('events'): + if event['message'] == 'get_test_zip_file3 success': + return + + time.sleep(1) + + assert False, "Test Failed" + + @mock_lambda def test_create_based_on_s3_with_missing_bucket(): conn = boto3.client('lambda', 'us-west-2') @@ -229,7 +291,7 @@ def test_create_function_from_aws_bucket(): 'Description': 'test lambda function', 'Timeout': 3, 'MemorySize': 128, - 'Version': '$LATEST', + 'Version': '1', 'VpcConfig': { "SecurityGroupIds": ["sg-123abc"], "SubnetIds": ["subnet-123abc"], @@ -274,7 +336,7 @@ def test_create_function_from_zipfile(): 'Timeout': 3, 'MemorySize': 128, 'CodeSha256': hashlib.sha256(zip_content).hexdigest(), - 'Version': '$LATEST', + 'Version': '1', 'VpcConfig': { "SecurityGroupIds": [], "SubnetIds": [], @@ -317,30 +379,32 @@ def test_get_function(): result['ResponseMetadata'].pop('RetryAttempts', None) result['Configuration'].pop('LastModified') - result.should.equal({ - "Code": { - "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region), - "RepositoryType": "S3" - }, - "Configuration": { - "CodeSha256": hashlib.sha256(zip_content).hexdigest(), - "CodeSize": len(zip_content), - "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), - "FunctionName": "testFunction", - "Handler": "lambda_function.lambda_handler", - "MemorySize": 128, - "Role": "test-iam-role", - "Runtime": "python2.7", - "Timeout": 3, - "Version": '$LATEST', - "VpcConfig": { - "SecurityGroupIds": [], - "SubnetIds": [], - } - }, - 'ResponseMetadata': {'HTTPStatusCode': 200}, - }) + result['Code']['Location'].should.equal('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip'.format(_lambda_region)) + result['Code']['RepositoryType'].should.equal('S3') + + result['Configuration']['CodeSha256'].should.equal(hashlib.sha256(zip_content).hexdigest()) + result['Configuration']['CodeSize'].should.equal(len(zip_content)) + result['Configuration']['Description'].should.equal('test lambda function') + result['Configuration'].should.contain('FunctionArn') + result['Configuration']['FunctionName'].should.equal('testFunction') + result['Configuration']['Handler'].should.equal('lambda_function.lambda_handler') + result['Configuration']['MemorySize'].should.equal(128) + result['Configuration']['Role'].should.equal('test-iam-role') + result['Configuration']['Runtime'].should.equal('python2.7') + result['Configuration']['Timeout'].should.equal(3) + result['Configuration']['Version'].should.equal('$LATEST') + result['Configuration'].should.contain('VpcConfig') + + # Test get function with + result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST') + result['Configuration']['Version'].should.equal('$LATEST') + result['Configuration']['FunctionArn'].should.equal('arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST') + + + # Test get function when can't find function name + with assert_raises(ClientError): + conn.get_function(FunctionName='junk', Qualifier='$LATEST') + @mock_lambda @@ -380,6 +444,52 @@ def test_delete_function(): FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError) +@mock_lambda +@mock_s3 +def test_publish(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=False, + ) + + function_list = conn.list_functions() + function_list['Functions'].should.have.length_of(1) + latest_arn = function_list['Functions'][0]['FunctionArn'] + + res = conn.publish_version(FunctionName='testFunction') + assert res['ResponseMetadata']['HTTPStatusCode'] == 201 + + function_list = conn.list_functions() + function_list['Functions'].should.have.length_of(2) + + # #SetComprehension ;-) + published_arn = list({f['FunctionArn'] for f in function_list['Functions']} - {latest_arn})[0] + published_arn.should.contain('testFunction:1') + + conn.delete_function(FunctionName='testFunction', Qualifier='1') + + function_list = conn.list_functions() + function_list['Functions'].should.have.length_of(1) + function_list['Functions'][0]['FunctionArn'].should.contain('testFunction') + + @mock_lambda @mock_s3 @freeze_time('2015-01-01 00:00:00') @@ -593,7 +703,7 @@ def test_invoke_async_function(): ) success_result = conn.invoke_async( - FunctionName='testFunction', + FunctionName='testFunction', InvokeArgs=json.dumps({'test': 'event'}) ) @@ -633,7 +743,7 @@ def test_get_function_created_with_zipfile(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn":'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, @@ -719,3 +829,107 @@ def get_function_policy(): assert isinstance(response['Policy'], str) res = json.loads(response['Policy']) assert res['Statement'][0]['Action'] == 'lambda:InvokeFunction' + + +@mock_lambda +@mock_s3 +def test_list_versions_by_function(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='arn:aws:iam::123456789012:role/test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + res = conn.publish_version(FunctionName='testFunction') + assert res['ResponseMetadata']['HTTPStatusCode'] == 201 + versions = conn.list_versions_by_function(FunctionName='testFunction') + assert len(versions['Versions']) == 3 + assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:$LATEST' + assert versions['Versions'][1]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:1' + assert versions['Versions'][2]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction:2' + + conn.create_function( + FunctionName='testFunction_2', + Runtime='python2.7', + Role='arn:aws:iam::123456789012:role/test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=False, + ) + versions = conn.list_versions_by_function(FunctionName='testFunction_2') + assert len(versions['Versions']) == 1 + assert versions['Versions'][0]['FunctionArn'] == 'arn:aws:lambda:us-west-2:123456789012:function:testFunction_2:$LATEST' + + +@mock_lambda +@mock_s3 +def test_create_function_with_already_exists(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + response = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + assert response['FunctionName'] == 'testFunction' + + +@mock_lambda +@mock_s3 +def test_list_versions_by_function_for_nonexistent_function(): + conn = boto3.client('lambda', 'us-west-2') + versions = conn.list_versions_by_function(FunctionName='testFunction') + + assert len(versions['Versions']) == 0 diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py index ec24cd911..310ac0b48 100644 --- a/tests/test_batch/test_batch.py +++ b/tests/test_batch/test_batch.py @@ -323,6 +323,54 @@ def test_create_job_queue(): resp.should.contain('jobQueues') len(resp['jobQueues']).should.equal(0) + # Create job queue which already exists + try: + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + + except ClientError as err: + err.response['Error']['Code'].should.equal('ClientException') + + + # Create job queue with incorrect state + try: + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue2', + state='JUNK', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + + except ClientError as err: + err.response['Error']['Code'].should.equal('ClientException') + + # Create job queue with no compute env + try: + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue3', + state='JUNK', + priority=123, + computeEnvironmentOrder=[ + + ] + ) + + except ClientError as err: + err.response['Error']['Code'].should.equal('ClientException') @mock_ec2 @mock_ecs @@ -397,6 +445,17 @@ def test_update_job_queue(): len(resp['jobQueues']).should.equal(1) resp['jobQueues'][0]['priority'].should.equal(5) + batch_client.update_job_queue( + jobQueue='test_job_queue', + priority=5 + ) + + resp = batch_client.describe_job_queues() + resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(1) + resp['jobQueues'][0]['priority'].should.equal(5) + + @mock_ec2 @mock_ecs diff --git a/tests/test_cloudformation/fixtures/kms_key.py b/tests/test_cloudformation/fixtures/kms_key.py new file mode 100644 index 000000000..366dbfcf5 --- /dev/null +++ b/tests/test_cloudformation/fixtures/kms_key.py @@ -0,0 +1,39 @@ +from __future__ import unicode_literals + +template = { + "AWSTemplateFormatVersion": "2010-09-09", + + "Description": "AWS CloudFormation Sample Template to create a KMS Key. The Fn::GetAtt is used to retrieve the ARN", + + "Resources" : { + "myKey" : { + "Type" : "AWS::KMS::Key", + "Properties" : { + "Description": "Sample KmsKey", + "EnableKeyRotation": False, + "Enabled": True, + "KeyPolicy" : { + "Version": "2012-10-17", + "Id": "key-default-1", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": { + "AWS": { "Fn::Join" : ["" , ["arn:aws:iam::", {"Ref" : "AWS::AccountId"} ,":root" ]] } + }, + "Action": "kms:*", + "Resource": "*" + } + ] + } + } + } + }, + "Outputs" : { + "KeyArn" : { + "Description": "Generated Key Arn", + "Value" : { "Fn::GetAtt" : [ "myKey", "Arn" ] } + } + } +} \ No newline at end of file diff --git a/tests/test_cloudformation/fixtures/vpc_eni.py b/tests/test_cloudformation/fixtures/vpc_eni.py index ef9eb1d08..3f8eb2d03 100644 --- a/tests/test_cloudformation/fixtures/vpc_eni.py +++ b/tests/test_cloudformation/fixtures/vpc_eni.py @@ -29,6 +29,10 @@ template = { "NinjaENI": { "Description": "Elastic IP mapping to Auto-Scaling Group", "Value": {"Ref": "ENI"} + }, + "ENIIpAddress": { + "Description": "ENI's Private IP address", + "Value": {"Fn::GetAtt": ["ENI", "PrimaryPrivateIpAddress"]} } } } diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 801faf8a1..b7906632b 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -266,9 +266,9 @@ def test_delete_stack_by_name(): template_body=dummy_template_json, ) - conn.list_stacks().should.have.length_of(1) + conn.describe_stacks().should.have.length_of(1) conn.delete_stack("test_stack") - conn.list_stacks().should.have.length_of(0) + conn.describe_stacks().should.have.length_of(0) @mock_cloudformation_deprecated @@ -279,9 +279,9 @@ def test_delete_stack_by_id(): template_body=dummy_template_json, ) - conn.list_stacks().should.have.length_of(1) + conn.describe_stacks().should.have.length_of(1) conn.delete_stack(stack_id) - conn.list_stacks().should.have.length_of(0) + conn.describe_stacks().should.have.length_of(0) with assert_raises(BotoServerError): conn.describe_stacks("test_stack") @@ -296,9 +296,9 @@ def test_delete_stack_with_resource_missing_delete_attr(): template_body=dummy_template_json3, ) - conn.list_stacks().should.have.length_of(1) + conn.describe_stacks().should.have.length_of(1) conn.delete_stack("test_stack") - conn.list_stacks().should.have.length_of(0) + conn.describe_stacks().should.have.length_of(0) @mock_cloudformation_deprecated diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index ed2ee8337..d05bc1b53 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -1,18 +1,15 @@ from __future__ import unicode_literals -import boto3 -import boto -import boto.s3 -import boto.s3.key -from botocore.exceptions import ClientError -from moto import mock_cloudformation, mock_s3, mock_sqs - import json +from collections import OrderedDict + +import boto3 +from botocore.exceptions import ClientError import sure # noqa # Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa from nose.tools import assert_raises -import random + +from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 dummy_template = { "AWSTemplateFormatVersion": "2010-09-09", @@ -39,7 +36,6 @@ dummy_template = { } } - dummy_template_yaml = """--- AWSTemplateFormatVersion: 2010-09-09 Description: Stack1 with yaml template @@ -57,7 +53,6 @@ Resources: Value: Name tag for tests """ - dummy_template_yaml_with_short_form_func = """--- AWSTemplateFormatVersion: 2010-09-09 Description: Stack1 with yaml template @@ -75,7 +70,6 @@ Resources: Value: Name tag for tests """ - dummy_template_yaml_with_ref = """--- AWSTemplateFormatVersion: 2010-09-09 Description: Stack1 with yaml template @@ -100,7 +94,6 @@ Resources: Value: !Ref TagName """ - dummy_update_template = { "AWSTemplateFormatVersion": "2010-09-09", "Parameters": { @@ -131,12 +124,12 @@ dummy_output_template = { } } }, - "Outputs" : { - "StackVPC" : { - "Description" : "The ID of the VPC", - "Value" : "VPCID", - "Export" : { - "Name" : "My VPC ID" + "Outputs": { + "StackVPC": { + "Description": "The ID of the VPC", + "Value": "VPCID", + "Export": { + "Name": "My VPC ID" } } } @@ -155,10 +148,458 @@ dummy_import_template = { } } +dummy_redrive_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MainQueue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "mainqueue.fifo", + "FifoQueue": True, + "ContentBasedDeduplication": False, + "RedrivePolicy": { + "deadLetterTargetArn": { + "Fn::GetAtt": [ + "DeadLetterQueue", + "Arn" + ] + }, + "maxReceiveCount": 5 + } + } + }, + "DeadLetterQueue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "FifoQueue": True + } + }, + } +} + dummy_template_json = json.dumps(dummy_template) -dummy_update_template_json = json.dumps(dummy_template) +dummy_update_template_json = json.dumps(dummy_update_template) dummy_output_template_json = json.dumps(dummy_output_template) dummy_import_template_json = json.dumps(dummy_import_template) +dummy_redrive_template_json = json.dumps(dummy_redrive_template) + + +@mock_cloudformation +def test_boto3_describe_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + usw2_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-2', + ) + use1_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-east-1', + ) + + usw2_instance['StackInstance'].should.have.key('Region').which.should.equal('us-west-2') + usw2_instance['StackInstance'].should.have.key('Account').which.should.equal('123456789012') + use1_instance['StackInstance'].should.have.key('Region').which.should.equal('us-east-1') + use1_instance['StackInstance'].should.have.key('Account').which.should.equal('123456789012') + + +@mock_cloudformation +def test_boto3_list_stacksets_length(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_set( + StackSetName="test_stack_set2", + TemplateBody=dummy_template_yaml, + ) + stacksets = cf_conn.list_stack_sets() + stacksets.should.have.length_of(2) + + +@mock_cloudformation +def test_boto3_list_stacksets_contents(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + stacksets = cf_conn.list_stack_sets() + stacksets['Summaries'][0].should.have.key('StackSetName').which.should.equal('test_stack_set') + stacksets['Summaries'][0].should.have.key('Status').which.should.equal('ACTIVE') + + +@mock_cloudformation +def test_boto3_stop_stack_set_operation(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + operation_id = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set")['Summaries'][-1]['OperationId'] + cf_conn.stop_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id + ) + list_operation = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set" + ) + list_operation['Summaries'][-1]['Status'].should.equal('STOPPED') + + +@mock_cloudformation +def test_boto3_describe_stack_set_operation(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + operation_id = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set")['Summaries'][-1]['OperationId'] + cf_conn.stop_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id + ) + response = cf_conn.describe_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id, + ) + + response['StackSetOperation']['Status'].should.equal('STOPPED') + response['StackSetOperation']['Action'].should.equal('CREATE') + + +@mock_cloudformation +def test_boto3_list_stack_set_operation_results(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + operation_id = cf_conn.list_stack_set_operations( + StackSetName="test_stack_set")['Summaries'][-1]['OperationId'] + + cf_conn.stop_stack_set_operation( + StackSetName="test_stack_set", + OperationId=operation_id + ) + response = cf_conn.list_stack_set_operation_results( + StackSetName="test_stack_set", + OperationId=operation_id, + ) + + response['Summaries'].should.have.length_of(3) + response['Summaries'][0].should.have.key('Account').which.should.equal('123456789012') + response['Summaries'][1].should.have.key('Status').which.should.equal('STOPPED') + + +@mock_cloudformation +def test_boto3_update_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + param = [ + {'ParameterKey': 'SomeParam', 'ParameterValue': 'StackSetValue'}, + {'ParameterKey': 'AnotherParam', 'ParameterValue': 'StackSetValue2'}, + ] + param_overrides = [ + {'ParameterKey': 'SomeParam', 'ParameterValue': 'OverrideValue'}, + {'ParameterKey': 'AnotherParam', 'ParameterValue': 'OverrideValue2'} + ] + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-1', 'us-west-2'], + ) + cf_conn.update_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-west-1', 'us-west-2'], + ParameterOverrides=param_overrides, + ) + usw2_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-2', + ) + usw1_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-1', + ) + use1_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-east-1', + ) + + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + + usw1_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + usw1_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + usw1_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + usw1_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + + use1_instance['StackInstance']['ParameterOverrides'].should.be.empty + + +@mock_cloudformation +def test_boto3_delete_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + + cf_conn.delete_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1'], + RetainStacks=False, + ) + + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'].should.have.length_of(1) + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'][0]['Region'].should.equal( + 'us-west-2') + + +@mock_cloudformation +def test_boto3_create_stack_instances(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'].should.have.length_of(2) + cf_conn.list_stack_instances(StackSetName="test_stack_set")['Summaries'][0]['Account'].should.equal( + '123456789012') + + +@mock_cloudformation +def test_boto3_create_stack_instances_with_param_overrides(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + param = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'StackSetValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'StackSetValue2'}, + ] + param_overrides = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'OverrideValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'OverrideValue2'} + ] + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ParameterOverrides=param_overrides, + ) + usw2_instance = cf_conn.describe_stack_instance( + StackSetName="test_stack_set", + StackInstanceAccount='123456789012', + StackInstanceRegion='us-west-2', + ) + + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + usw2_instance['StackInstance']['ParameterOverrides'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + usw2_instance['StackInstance']['ParameterOverrides'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + + +@mock_cloudformation +def test_update_stack_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + param = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'StackSetValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'StackSetValue2'}, + ] + param_overrides = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'OverrideValue'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'OverrideValue2'} + ] + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param, + ) + cf_conn.update_stack_set( + StackSetName='test_stack_set', + TemplateBody=dummy_template_yaml_with_ref, + Parameters=param_overrides, + ) + stackset = cf_conn.describe_stack_set(StackSetName='test_stack_set') + + stackset['StackSet']['Parameters'][0]['ParameterValue'].should.equal(param_overrides[0]['ParameterValue']) + stackset['StackSet']['Parameters'][1]['ParameterValue'].should.equal(param_overrides[1]['ParameterValue']) + stackset['StackSet']['Parameters'][0]['ParameterKey'].should.equal(param_overrides[0]['ParameterKey']) + stackset['StackSet']['Parameters'][1]['ParameterKey'].should.equal(param_overrides[1]['ParameterKey']) + + +@mock_cloudformation +def test_boto3_list_stack_set_operations(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.create_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + cf_conn.update_stack_instances( + StackSetName="test_stack_set", + Accounts=['123456789012'], + Regions=['us-east-1', 'us-west-2'], + ) + + list_operation = cf_conn.list_stack_set_operations(StackSetName="test_stack_set") + list_operation['Summaries'].should.have.length_of(2) + list_operation['Summaries'][-1]['Action'].should.equal('UPDATE') + + +@mock_cloudformation +def test_boto3_delete_stack_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + cf_conn.delete_stack_set(StackSetName='test_stack_set') + + cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['Status'].should.equal( + 'DELETED') + + +@mock_cloudformation +def test_boto3_create_stack_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_json, + ) + + cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['TemplateBody'].should.equal( + dummy_template_json) + + +@mock_cloudformation +def test_boto3_create_stack_set_with_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack_set( + StackSetName="test_stack_set", + TemplateBody=dummy_template_yaml, + ) + + cf_conn.describe_stack_set(StackSetName="test_stack_set")['StackSet']['TemplateBody'].should.equal( + dummy_template_yaml) + + +@mock_cloudformation +@mock_s3 +def test_create_stack_set_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + bucket = s3_conn.create_bucket(Bucket="foobar") + + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + + cf_conn = boto3.client('cloudformation', region_name='us-west-1') + cf_conn.create_stack_set( + StackSetName='stack_from_url', + TemplateURL=key_url, + ) + cf_conn.describe_stack_set(StackSetName="stack_from_url")['StackSet']['TemplateBody'].should.equal( + dummy_template_json) + + +@mock_cloudformation +def test_boto3_create_stack_set_with_ref_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + params = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, + ] + cf_conn.create_stack_set( + StackSetName="test_stack", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=params + ) + + cf_conn.describe_stack_set(StackSetName="test_stack")['StackSet']['TemplateBody'].should.equal( + dummy_template_yaml_with_ref) + + +@mock_cloudformation +def test_boto3_describe_stack_set_params(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + params = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, + ] + cf_conn.create_stack_set( + StackSetName="test_stack", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=params + ) + + cf_conn.describe_stack_set(StackSetName="test_stack")['StackSet']['Parameters'].should.equal( + params) @mock_cloudformation @@ -170,7 +611,8 @@ def test_boto3_create_stack(): ) cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - dummy_template) + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) + @mock_cloudformation def test_boto3_create_stack_with_yaml(): @@ -278,9 +720,150 @@ def test_create_stack_from_s3_url(): StackName='stack_from_url', TemplateURL=key_url, ) + cf_conn.get_template(StackName="stack_from_url")['TemplateBody'].should.equal( + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) - cf_conn.get_template(StackName="stack_from_url")[ - 'TemplateBody'].should.equal(dummy_template) + +@mock_cloudformation +def test_update_stack_with_previous_value(): + name = 'update_stack_with_previous_value' + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName=name, TemplateBody=dummy_template_yaml_with_ref, + Parameters=[ + {'ParameterKey': 'TagName', 'ParameterValue': 'foo'}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'bar'}, + ] + ) + cf_conn.update_stack( + StackName=name, UsePreviousTemplate=True, + Parameters=[ + {'ParameterKey': 'TagName', 'UsePreviousValue': True}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'not bar'}, + ] + ) + stack = cf_conn.describe_stacks(StackName=name)['Stacks'][0] + tag_name = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagName'][0] + tag_desc = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagDescription'][0] + assert tag_name == 'foo' + assert tag_desc == 'not bar' + + +@mock_cloudformation +@mock_s3 +@mock_ec2 +def test_update_stack_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="update_stack_from_url", + TemplateBody=dummy_template_json, + Tags=[{'Key': 'foo', 'Value': 'bar'}], + ) + + s3_conn.create_bucket(Bucket="foobar") + + s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_update_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + + cf_conn.update_stack( + StackName="update_stack_from_url", + TemplateURL=key_url, + ) + + cf_conn.get_template(StackName="update_stack_from_url")[ 'TemplateBody'].should.equal( + json.loads(dummy_update_template_json, object_pairs_hook=OrderedDict)) + + +@mock_cloudformation +@mock_s3 +def test_create_change_set_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + bucket = s3_conn.create_bucket(Bucket="foobar") + + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + cf_conn = boto3.client('cloudformation', region_name='us-west-1') + response = cf_conn.create_change_set( + StackName='NewStack', + TemplateURL=key_url, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + Tags=[ + {'Key': 'tag-key', 'Value': 'tag-value'} + ], + ) + assert 'arn:aws:cloudformation:us-west-1:123456789:changeSet/NewChangeSet/' in response['Id'] + assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId'] + + +@mock_cloudformation +def test_describe_change_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + + stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet") + stack['ChangeSetName'].should.equal('NewChangeSet') + stack['StackName'].should.equal('NewStack') + + cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_update_template_json, + ChangeSetName='NewChangeSet2', + ChangeSetType='UPDATE', + ) + stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet2") + stack['ChangeSetName'].should.equal('NewChangeSet2') + stack['StackName'].should.equal('NewStack') + stack['Changes'].should.have.length_of(2) + + +@mock_cloudformation +def test_execute_change_set_w_arn(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName=change_set['Id']) + + +@mock_cloudformation +def test_execute_change_set_w_name(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName='NewChangeSet', StackName='NewStack') @mock_cloudformation @@ -349,6 +932,20 @@ def test_describe_stack_by_stack_id(): stack_by_id['StackName'].should.equal("test_stack") +@mock_cloudformation +def test_list_change_sets(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_change_set( + StackName='NewStack2', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet2', + ChangeSetType='CREATE', + ) + change_set = cf_conn.list_change_sets(StackName='NewStack2')['Summaries'][0] + change_set['StackName'].should.equal('NewStack2') + change_set['ChangeSetName'].should.equal('NewChangeSet2') + + @mock_cloudformation def test_list_stacks(): cf = boto3.resource('cloudformation', region_name='us-east-1') @@ -382,6 +979,23 @@ def test_delete_stack_from_resource(): @mock_cloudformation +@mock_ec2 +def test_delete_change_set(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + + cf_conn.list_change_sets(StackName='NewStack')['Summaries'].should.have.length_of(1) + cf_conn.delete_change_set(ChangeSetName='NewChangeSet', StackName='NewStack') + cf_conn.list_change_sets(StackName='NewStack')['Summaries'].should.have.length_of(0) + + +@mock_cloudformation +@mock_ec2 def test_delete_stack_by_name(): cf_conn = boto3.client('cloudformation', region_name='us-east-1') cf_conn.create_stack( @@ -394,6 +1008,21 @@ def test_delete_stack_by_name(): cf_conn.describe_stacks()['Stacks'].should.have.length_of(0) +@mock_cloudformation +def test_delete_stack(): + cf = boto3.client('cloudformation', region_name='us-east-1') + cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + cf.delete_stack( + StackName="test_stack", + ) + stacks = cf.list_stacks() + assert stacks['StackSummaries'][0]['StackStatus'] == 'DELETE_COMPLETE' + + @mock_cloudformation def test_describe_deleted_stack(): cf_conn = boto3.client('cloudformation', region_name='us-east-1') @@ -412,6 +1041,7 @@ def test_describe_deleted_stack(): @mock_cloudformation +@mock_ec2 def test_describe_updated_stack(): cf_conn = boto3.client('cloudformation', region_name='us-east-1') cf_conn.create_stack( @@ -502,6 +1132,7 @@ def test_stack_tags(): @mock_cloudformation +@mock_ec2 def test_stack_events(): cf = boto3.resource('cloudformation', region_name='us-east-1') stack = cf.create_stack( @@ -617,6 +1248,7 @@ def test_export_names_must_be_unique(): TemplateBody=dummy_output_template_json, ) + @mock_sqs @mock_cloudformation def test_stack_with_imports(): @@ -636,3 +1268,19 @@ def test_stack_with_imports(): output = output_stack.outputs[0]['OutputValue'] queue = ec2_resource.get_queue_by_name(QueueName=output) queue.should_not.be.none + + +@mock_sqs +@mock_cloudformation +def test_non_json_redrive_policy(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + + stack = cf.create_stack( + StackName="test_stack1", + TemplateBody=dummy_redrive_template_json + ) + + stack.Resource('MainQueue').resource_status\ + .should.equal("CREATE_COMPLETE") + stack.Resource('DeadLetterQueue').resource_status\ + .should.equal("CREATE_COMPLETE") diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 711d9153f..42ddd2351 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import json import base64 +from decimal import Decimal + import boto import boto.cloudformation import boto.datapipeline @@ -22,6 +24,7 @@ from moto import ( mock_cloudformation, mock_cloudformation_deprecated, mock_datapipeline_deprecated, + mock_dynamodb2, mock_ec2, mock_ec2_deprecated, mock_elb, @@ -39,6 +42,7 @@ from moto import ( mock_sqs, mock_sqs_deprecated, mock_elbv2) +from moto.dynamodb2.models import Table from .fixtures import ( ec2_classic_eip, @@ -752,6 +756,9 @@ def test_vpc_single_instance_in_subnet(): security_group.vpc_id.should.equal(vpc.id) stack = conn.describe_stacks()[0] + + vpc.tags.should.have.key('Application').which.should.equal(stack.stack_id) + resources = stack.describe_resources() vpc_resource = [ resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] @@ -891,19 +898,25 @@ def test_iam_roles(): "my-launch-config": { "Properties": { - "IamInstanceProfile": {"Ref": "my-instance-profile"}, + "IamInstanceProfile": {"Ref": "my-instance-profile-with-path"}, "ImageId": "ami-1234abcd", }, "Type": "AWS::AutoScaling::LaunchConfiguration" }, - "my-instance-profile": { + "my-instance-profile-with-path": { "Properties": { "Path": "my-path", - "Roles": [{"Ref": "my-role"}], + "Roles": [{"Ref": "my-role-with-path"}], }, "Type": "AWS::IAM::InstanceProfile" }, - "my-role": { + "my-instance-profile-no-path": { + "Properties": { + "Roles": [{"Ref": "my-role-no-path"}], + }, + "Type": "AWS::IAM::InstanceProfile" + }, + "my-role-with-path": { "Properties": { "AssumeRolePolicyDocument": { "Statement": [ @@ -961,6 +974,26 @@ def test_iam_roles(): ] }, "Type": "AWS::IAM::Role" + }, + "my-role-no-path": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + }, + }, + "Type": "AWS::IAM::Role" } } } @@ -974,37 +1007,51 @@ def test_iam_roles(): iam_conn = boto.iam.connect_to_region("us-west-1") - role_result = iam_conn.list_roles()['list_roles_response'][ - 'list_roles_result']['roles'][0] - role = iam_conn.get_role(role_result.role_name) - role.role_name.should.contain("my-role") - role.path.should.equal("my-path") + role_results = iam_conn.list_roles()['list_roles_response'][ + 'list_roles_result']['roles'] + role_name_to_id = {} + for role_result in role_results: + role = iam_conn.get_role(role_result.role_name) + role.role_name.should.contain("my-role") + if 'with-path' in role.role_name: + role_name_to_id['with-path'] = role.role_id + role.path.should.equal("my-path") + else: + role_name_to_id['no-path'] = role.role_id + role.role_name.should.contain('no-path') + role.path.should.equal('/') - instance_profile_response = iam_conn.list_instance_profiles()[ - 'list_instance_profiles_response'] - cfn_instance_profile = instance_profile_response[ - 'list_instance_profiles_result']['instance_profiles'][0] - instance_profile = iam_conn.get_instance_profile( - cfn_instance_profile.instance_profile_name) - instance_profile.instance_profile_name.should.contain( - "my-instance-profile") - instance_profile.path.should.equal("my-path") - instance_profile.role_id.should.equal(role.role_id) + instance_profile_responses = iam_conn.list_instance_profiles()[ + 'list_instance_profiles_response']['list_instance_profiles_result']['instance_profiles'] + instance_profile_responses.should.have.length_of(2) + instance_profile_names = [] + + for instance_profile_response in instance_profile_responses: + instance_profile = iam_conn.get_instance_profile(instance_profile_response.instance_profile_name) + instance_profile_names.append(instance_profile.instance_profile_name) + instance_profile.instance_profile_name.should.contain( + "my-instance-profile") + if "with-path" in instance_profile.instance_profile_name: + instance_profile.path.should.equal("my-path") + instance_profile.role_id.should.equal(role_name_to_id['with-path']) + else: + instance_profile.instance_profile_name.should.contain('no-path') + instance_profile.role_id.should.equal(role_name_to_id['no-path']) + instance_profile.path.should.equal('/') autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") launch_config = autoscale_conn.get_all_launch_configurations()[0] - launch_config.instance_profile_name.should.contain("my-instance-profile") + launch_config.instance_profile_name.should.contain("my-instance-profile-with-path") stack = conn.describe_stacks()[0] resources = stack.describe_resources() - instance_profile_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'][0] - instance_profile_resource.physical_resource_id.should.equal( - instance_profile.instance_profile_name) + instance_profile_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'] + {ip.physical_resource_id for ip in instance_profile_resources}.should.equal(set(instance_profile_names)) - role_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'][0] - role_resource.physical_resource_id.should.equal(role.role_id) + role_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'] + {r.physical_resource_id for r in role_resources}.should.equal(set(role_name_to_id.values())) @mock_ec2_deprecated() @@ -1909,7 +1956,12 @@ def lambda_handler(event, context): "Description": "Test function", "MemorySize": 128, "Role": "test-role", - "Runtime": "python2.7" + "Runtime": "python2.7", + "Environment": { + "Variables": { + "TEST_ENV_KEY": "test-env-val", + } + }, } } } @@ -1930,6 +1982,9 @@ def lambda_handler(event, context): result['Functions'][0]['MemorySize'].should.equal(128) result['Functions'][0]['Role'].should.equal('test-role') result['Functions'][0]['Runtime'].should.equal('python2.7') + result['Functions'][0]['Environment'].should.equal({ + "Variables": {"TEST_ENV_KEY": "test-env-val"} + }) @mock_cloudformation @@ -2034,7 +2089,7 @@ def test_stack_kms(): def test_stack_spot_fleet(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] @@ -2113,6 +2168,78 @@ def test_stack_spot_fleet(): launch_spec['WeightedCapacity'].should.equal(2.0) +@mock_cloudformation() +@mock_ec2() +def test_stack_spot_fleet_should_figure_out_default_price(): + conn = boto3.client('ec2', 'us-east-1') + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + + spot_fleet_template = { + 'Resources': { + "SpotFleet1": { + "Type": "AWS::EC2::SpotFleet", + "Properties": { + "SpotFleetRequestConfigData": { + "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", + "TargetCapacity": 6, + "AllocationStrategy": "diversified", + "LaunchSpecifications": [ + { + "EbsOptimized": "false", + "InstanceType": 't2.small', + "ImageId": "ami-1234", + "SubnetId": subnet_id, + "WeightedCapacity": "2", + }, + { + "EbsOptimized": "true", + "InstanceType": 't2.large', + "ImageId": "ami-1234", + "Monitoring": {"Enabled": "true"}, + "SecurityGroups": [{"GroupId": "sg-123"}], + "SubnetId": subnet_id, + "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, + "WeightedCapacity": "4", + } + ] + } + } + } + } + } + spot_fleet_template_json = json.dumps(spot_fleet_template) + + cf_conn = boto3.client('cloudformation', 'us-east-1') + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=spot_fleet_template_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + stack_resources['StackResourceSummaries'].should.have.length_of(1) + spot_fleet_id = stack_resources[ + 'StackResourceSummaries'][0]['PhysicalResourceId'] + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_request['SpotFleetRequestState'].should.equal("active") + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + assert 'SpotPrice' not in spot_fleet_config + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] + launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] + + assert 'SpotPrice' not in launch_spec1 + assert 'SpotPrice' not in launch_spec2 + + @mock_ec2 @mock_elbv2 @mock_cloudformation @@ -2128,6 +2255,10 @@ def test_stack_elbv2_resources_integration(): "Description": "Load balancer name", "Value": {"Fn::GetAtt": ["alb", "LoadBalancerName"]}, }, + "canonicalhostedzoneid": { + "Description": "Load balancer canonical hosted zone ID", + "Value": {"Fn::GetAtt": ["alb", "CanonicalHostedZoneID"]}, + }, }, "Resources": { "alb": { @@ -2145,7 +2276,7 @@ def test_stack_elbv2_resources_integration(): "IpAddressType": "ipv4", } }, - "mytargetgroup": { + "mytargetgroup1": { "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", "Properties": { "HealthCheckIntervalSeconds": 30, @@ -2158,7 +2289,7 @@ def test_stack_elbv2_resources_integration(): "Matcher": { "HttpCode": "200,201" }, - "Name": "mytargetgroup", + "Name": "mytargetgroup1", "Port": 80, "Protocol": "HTTP", "TargetType": "instance", @@ -2173,12 +2304,37 @@ def test_stack_elbv2_resources_integration(): } } }, + "mytargetgroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "HealthCheckIntervalSeconds": 30, + "HealthCheckPath": "/status", + "HealthCheckPort": 8080, + "HealthCheckProtocol": "HTTP", + "HealthCheckTimeoutSeconds": 5, + "HealthyThresholdCount": 30, + "UnhealthyThresholdCount": 5, + "Name": "mytargetgroup2", + "Port": 8080, + "Protocol": "HTTP", + "TargetType": "instance", + "Targets": [{ + "Id": { + "Ref": "ec2instance", + "Port": 8080, + }, + }], + "VpcId": { + "Ref": "myvpc", + } + } + }, "listener": { "Type": "AWS::ElasticLoadBalancingV2::Listener", "Properties": { "DefaultActions": [{ "Type": "forward", - "TargetGroupArn": {"Ref": "mytargetgroup"} + "TargetGroupArn": {"Ref": "mytargetgroup1"} }], "LoadBalancerArn": {"Ref": "alb"}, "Port": "80", @@ -2232,8 +2388,10 @@ def test_stack_elbv2_resources_integration(): load_balancers[0]['Type'].should.equal('application') load_balancers[0]['IpAddressType'].should.equal('ipv4') - target_groups = elbv2_conn.describe_target_groups()['TargetGroups'] - len(target_groups).should.equal(1) + target_groups = sorted( + elbv2_conn.describe_target_groups()['TargetGroups'], + key=lambda tg: tg['TargetGroupName']) # sort to do comparison with indexes + len(target_groups).should.equal(2) target_groups[0]['HealthCheckIntervalSeconds'].should.equal(30) target_groups[0]['HealthCheckPath'].should.equal('/status') target_groups[0]['HealthCheckPort'].should.equal('80') @@ -2242,11 +2400,24 @@ def test_stack_elbv2_resources_integration(): target_groups[0]['HealthyThresholdCount'].should.equal(30) target_groups[0]['UnhealthyThresholdCount'].should.equal(5) target_groups[0]['Matcher'].should.equal({'HttpCode': '200,201'}) - target_groups[0]['TargetGroupName'].should.equal('mytargetgroup') + target_groups[0]['TargetGroupName'].should.equal('mytargetgroup1') target_groups[0]['Port'].should.equal(80) target_groups[0]['Protocol'].should.equal('HTTP') target_groups[0]['TargetType'].should.equal('instance') + target_groups[1]['HealthCheckIntervalSeconds'].should.equal(30) + target_groups[1]['HealthCheckPath'].should.equal('/status') + target_groups[1]['HealthCheckPort'].should.equal('8080') + target_groups[1]['HealthCheckProtocol'].should.equal('HTTP') + target_groups[1]['HealthCheckTimeoutSeconds'].should.equal(5) + target_groups[1]['HealthyThresholdCount'].should.equal(30) + target_groups[1]['UnhealthyThresholdCount'].should.equal(5) + target_groups[1]['Matcher'].should.equal({'HttpCode': '200'}) + target_groups[1]['TargetGroupName'].should.equal('mytargetgroup2') + target_groups[1]['Port'].should.equal(8080) + target_groups[1]['Protocol'].should.equal('HTTP') + target_groups[1]['TargetType'].should.equal('instance') + listeners = elbv2_conn.describe_listeners(LoadBalancerArn=load_balancers[0]['LoadBalancerArn'])['Listeners'] len(listeners).should.equal(1) listeners[0]['LoadBalancerArn'].should.equal(load_balancers[0]['LoadBalancerArn']) @@ -2266,3 +2437,131 @@ def test_stack_elbv2_resources_integration(): dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) + + +@mock_dynamodb2 +@mock_cloudformation +def test_stack_dynamodb_resources_integration(): + dynamodb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "myDynamoDBTable": { + "Type": "AWS::DynamoDB::Table", + "Properties": { + "AttributeDefinitions": [ + { + "AttributeName": "Album", + "AttributeType": "S" + }, + { + "AttributeName": "Artist", + "AttributeType": "S" + }, + { + "AttributeName": "Sales", + "AttributeType": "N" + }, + { + "AttributeName": "NumberOfSongs", + "AttributeType": "N" + } + ], + "KeySchema": [ + { + "AttributeName": "Album", + "KeyType": "HASH" + }, + { + "AttributeName": "Artist", + "KeyType": "RANGE" + } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + }, + "TableName": "myTableName", + "GlobalSecondaryIndexes": [{ + "IndexName": "myGSI", + "KeySchema": [ + { + "AttributeName": "Sales", + "KeyType": "HASH" + }, + { + "AttributeName": "Artist", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Album","NumberOfSongs"], + "ProjectionType": "INCLUDE" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + } + }, + { + "IndexName": "myGSI2", + "KeySchema": [ + { + "AttributeName": "NumberOfSongs", + "KeyType": "HASH" + }, + { + "AttributeName": "Sales", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Album","Artist"], + "ProjectionType": "INCLUDE" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": "5", + "WriteCapacityUnits": "5" + } + }], + "LocalSecondaryIndexes":[{ + "IndexName": "myLSI", + "KeySchema": [ + { + "AttributeName": "Album", + "KeyType": "HASH" + }, + { + "AttributeName": "Sales", + "KeyType": "RANGE" + } + ], + "Projection": { + "NonKeyAttributes": ["Artist","NumberOfSongs"], + "ProjectionType": "INCLUDE" + } + }] + } + } + } + } + + dynamodb_template_json = json.dumps(dynamodb_template) + + cfn_conn = boto3.client('cloudformation', 'us-east-1') + cfn_conn.create_stack( + StackName='dynamodb_stack', + TemplateBody=dynamodb_template_json, + ) + + dynamodb_conn = boto3.resource('dynamodb', region_name='us-east-1') + table = dynamodb_conn.Table('myTableName') + table.name.should.equal('myTableName') + + table.put_item(Item={"Album": "myAlbum", "Artist": "myArtist", "Sales": 10, "NumberOfSongs": 5}) + + response = table.get_item(Key={"Album": "myAlbum", "Artist": "myArtist"}) + + response['Item']['Album'].should.equal('myAlbum') + response['Item']['Sales'].should.equal(Decimal('10')) + response['Item']['NumberOfSongs'].should.equal(Decimal('5')) + response['Item']['Album'].should.equal('myAlbum') diff --git a/tests/test_cloudformation/test_import_value.py b/tests/test_cloudformation/test_import_value.py new file mode 100644 index 000000000..04c2b5801 --- /dev/null +++ b/tests/test_cloudformation/test_import_value.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function, unicode_literals + +# Standard library modules +import unittest + +# Third-party modules +import boto3 +from botocore.exceptions import ClientError + +# Package modules +from moto import mock_cloudformation + +AWS_REGION = 'us-west-1' + +SG_STACK_NAME = 'simple-sg-stack' +SG_TEMPLATE = """ +AWSTemplateFormatVersion: 2010-09-09 +Description: Simple test CF template for moto_cloudformation + + +Resources: + SimpleSecurityGroup: + Type: AWS::EC2::SecurityGroup + Description: "A simple security group" + Properties: + GroupName: simple-security-group + GroupDescription: "A simple security group" + SecurityGroupEgress: + - + Description: "Egress to remote HTTPS servers" + CidrIp: 0.0.0.0/0 + IpProtocol: tcp + FromPort: 443 + ToPort: 443 + +Outputs: + SimpleSecurityGroupName: + Value: !GetAtt SimpleSecurityGroup.GroupId + Export: + Name: "SimpleSecurityGroup" + +""" + +EC2_STACK_NAME = 'simple-ec2-stack' +EC2_TEMPLATE = """ +--- +# The latest template format version is "2010-09-09" and as of 2018-04-09 +# is currently the only valid value. +AWSTemplateFormatVersion: 2010-09-09 +Description: Simple test CF template for moto_cloudformation + + +Resources: + SimpleInstance: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-03cf127a + InstanceType: t2.micro + SecurityGroups: !Split [',', !ImportValue SimpleSecurityGroup] +""" + + +class TestSimpleInstance(unittest.TestCase): + def test_simple_instance(self): + """Test that we can create a simple CloudFormation stack that imports values from an existing CloudFormation stack""" + with mock_cloudformation(): + client = boto3.client('cloudformation', region_name=AWS_REGION) + client.create_stack(StackName=SG_STACK_NAME, TemplateBody=SG_TEMPLATE) + response = client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) + self.assertIn('StackId', response) + response = client.describe_stacks(StackName=response['StackId']) + self.assertIn('Stacks', response) + stack_info = response['Stacks'] + self.assertEqual(1, len(stack_info)) + self.assertIn('StackName', stack_info[0]) + self.assertEqual(EC2_STACK_NAME, stack_info[0]['StackName']) + + def test_simple_instance_missing_export(self): + """Test that we get an exception if a CloudFormation stack tries to imports a non-existent export value""" + with mock_cloudformation(): + client = boto3.client('cloudformation', region_name=AWS_REGION) + with self.assertRaises(ClientError) as e: + client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) + self.assertIn('Error', e.exception.response) + self.assertIn('Code', e.exception.response['Error']) + self.assertEqual('ExportNotFound', e.exception.response['Error']['Code']) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index d9fe4d80d..25242e352 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -75,6 +75,26 @@ get_attribute_output = { } } +get_availability_zones_output = { + "Outputs": { + "Output1": { + "Value": {"Fn::GetAZs": ""} + } + } +} + +parameters = { + "Parameters": { + "Param": { + "Type": "String", + }, + "NoEchoParam": { + "Type": "String", + "NoEcho": True + } + } +} + split_select_template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { @@ -146,6 +166,11 @@ bad_outputs_template = dict( list(dummy_template.items()) + list(bad_output.items())) get_attribute_outputs_template = dict( list(dummy_template.items()) + list(get_attribute_output.items())) +get_availability_zones_template = dict( + list(dummy_template.items()) + list(get_availability_zones_output.items())) + +parameters_template = dict( + list(dummy_template.items()) + list(parameters.items())) dummy_template_json = json.dumps(dummy_template) name_type_template_json = json.dumps(name_type_template) @@ -153,6 +178,9 @@ output_type_template_json = json.dumps(outputs_template) bad_output_template_json = json.dumps(bad_outputs_template) get_attribute_outputs_template_json = json.dumps( get_attribute_outputs_template) +get_availability_zones_template_json = json.dumps( + get_availability_zones_template) +parameters_template_json = json.dumps(parameters_template) split_select_template_json = json.dumps(split_select_template) sub_template_json = json.dumps(sub_template) export_value_template_json = json.dumps(export_value_template) @@ -242,12 +270,54 @@ def test_parse_stack_with_get_attribute_outputs(): output.should.be.a(Output) output.value.should.equal("my-queue") +def test_parse_stack_with_get_attribute_kms(): + from .fixtures.kms_key import template + + template_json = json.dumps(template) + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=template_json, + parameters={}, + region_name='us-west-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('KeyArn') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + +def test_parse_stack_with_get_availability_zones(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=get_availability_zones_template_json, + parameters={}, + region_name='us-east-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('Output1') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + output.value.should.equal([ "us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d" ]) + def test_parse_stack_with_bad_get_attribute_outputs(): FakeStack.when.called_with( "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) +def test_parse_stack_with_parameters(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=parameters_template_json, + parameters={"Param": "visible value", "NoEchoParam": "hidden value"}, + region_name='us-west-1') + + stack.resource_map.no_echo_parameter_keys.should.have("NoEchoParam") + stack.resource_map.no_echo_parameter_keys.should_not.have("Param") + + def test_parse_equals_condition(): parse_condition( condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, @@ -406,8 +476,8 @@ def test_short_form_func_in_yaml_teamplate(): KeySplit: !Split [A, B] KeySub: !Sub A """ - yaml.add_multi_constructor('', yaml_tag_constructor) - template_dict = yaml.load(template) + yaml.add_multi_constructor('', yaml_tag_constructor, Loader=yaml.Loader) + template_dict = yaml.load(template, Loader=yaml.Loader) key_and_expects = [ ['KeyRef', {'Ref': 'foo'}], ['KeyB64', {'Fn::Base64': 'valueToEncode'}], diff --git a/tests/test_cloudformation/test_validate.py b/tests/test_cloudformation/test_validate.py new file mode 100644 index 000000000..e2c3af05d --- /dev/null +++ b/tests/test_cloudformation/test_validate.py @@ -0,0 +1,115 @@ +from collections import OrderedDict +import json +import yaml +import os +import boto3 +from nose.tools import raises +import botocore + + +from moto.cloudformation.exceptions import ValidationError +from moto.cloudformation.models import FakeStack +from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export +from moto.sqs.models import Queue +from moto.s3.models import FakeBucket +from moto.cloudformation.utils import yaml_tag_constructor +from boto.cloudformation.stack import Output +from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 + +json_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-d3adb33f", + "KeyName": "dummy", + "InstanceType": "t2.micro", + "Tags": [ + { + "Key": "Description", + "Value": "Test tag" + }, + { + "Key": "Name", + "Value": "Name tag for tests" + } + ] + } + } + } +} + +# One resource is required +json_bad_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1" +} + +dummy_template_json = json.dumps(json_template) +dummy_bad_template_json = json.dumps(json_bad_template) + + +@mock_cloudformation +def test_boto3_json_validate_successful(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + response = cf_conn.validate_template( + TemplateBody=dummy_template_json, + ) + assert response['Description'] == "Stack 1" + assert response['Parameters'] == [] + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + +@mock_cloudformation +def test_boto3_json_invalid_missing_resource(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + try: + cf_conn.validate_template( + TemplateBody=dummy_bad_template_json, + ) + assert False + except botocore.exceptions.ClientError as e: + assert str(e) == 'An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack' \ + ' with id Missing top level item Resources to file module does not exist' + assert True + + +yaml_template = """ + AWSTemplateFormatVersion: '2010-09-09' + Description: Simple CloudFormation Test Template + Resources: + S3Bucket: + Type: AWS::S3::Bucket + Properties: + AccessControl: PublicRead + BucketName: cf-test-bucket-1 +""" + +yaml_bad_template = """ + AWSTemplateFormatVersion: '2010-09-09' + Description: Simple CloudFormation Test Template +""" + +@mock_cloudformation +def test_boto3_yaml_validate_successful(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + response = cf_conn.validate_template( + TemplateBody=yaml_template, + ) + assert response['Description'] == "Simple CloudFormation Test Template" + assert response['Parameters'] == [] + assert response['ResponseMetadata']['HTTPStatusCode'] == 200 + +@mock_cloudformation +def test_boto3_yaml_invalid_missing_resource(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + try: + cf_conn.validate_template( + TemplateBody=yaml_bad_template, + ) + assert False + except botocore.exceptions.ClientError as e: + assert str(e) == 'An error occurred (ValidationError) when calling the ValidateTemplate operation: Stack' \ + ' with id Missing top level item Resources to file module does not exist' + assert True diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 2f8528855..a0f3871c0 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -1,5 +1,8 @@ import boto from boto.ec2.cloudwatch.alarm import MetricAlarm +import boto3 +from datetime import datetime, timedelta +import pytz import sure # noqa from moto import mock_cloudwatch_deprecated diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py old mode 100644 new mode 100755 index e621a642a..40b5eee08 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import boto3 from botocore.exceptions import ClientError +from datetime import datetime, timedelta +import pytz import sure # noqa from moto import mock_cloudwatch @@ -125,18 +127,98 @@ def test_alarm_state(): ) len(resp['MetricAlarms']).should.equal(1) resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1') + resp['MetricAlarms'][0]['StateValue'].should.equal('ALARM') resp = client.describe_alarms( StateValue='OK' ) len(resp['MetricAlarms']).should.equal(1) resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2') + resp['MetricAlarms'][0]['StateValue'].should.equal('OK') # Just for sanity resp = client.describe_alarms() len(resp['MetricAlarms']).should.equal(2) +@mock_cloudwatch +def test_put_metric_data_no_dimensions(): + conn = boto3.client('cloudwatch', region_name='us-east-1') + + conn.put_metric_data( + Namespace='tester', + MetricData=[ + dict( + MetricName='metric', + Value=1.5, + ) + ] + ) + + metrics = conn.list_metrics()['Metrics'] + metrics.should.have.length_of(1) + metric = metrics[0] + metric['Namespace'].should.equal('tester') + metric['MetricName'].should.equal('metric') +@mock_cloudwatch +def test_put_metric_data_with_statistics(): + conn = boto3.client('cloudwatch', region_name='us-east-1') + + conn.put_metric_data( + Namespace='tester', + MetricData=[ + dict( + MetricName='statmetric', + Timestamp=datetime(2015, 1, 1), + # no Value to test https://github.com/spulec/moto/issues/1615 + StatisticValues=dict( + SampleCount=123.0, + Sum=123.0, + Minimum=123.0, + Maximum=123.0 + ), + Unit='Milliseconds', + StorageResolution=123 + ) + ] + ) + + metrics = conn.list_metrics()['Metrics'] + metrics.should.have.length_of(1) + metric = metrics[0] + metric['Namespace'].should.equal('tester') + metric['MetricName'].should.equal('statmetric') + # TODO: test statistics - https://github.com/spulec/moto/issues/1615 + +@mock_cloudwatch +def test_get_metric_statistics(): + conn = boto3.client('cloudwatch', region_name='us-east-1') + utc_now = datetime.now(tz=pytz.utc) + + conn.put_metric_data( + Namespace='tester', + MetricData=[ + dict( + MetricName='metric', + Value=1.5, + Timestamp=utc_now + ) + ] + ) + + stats = conn.get_metric_statistics( + Namespace='tester', + MetricName='metric', + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + Period=60, + Statistics=['SampleCount', 'Sum'] + ) + + stats['Datapoints'].should.have.length_of(1) + datapoint = stats['Datapoints'][0] + datapoint['SampleCount'].should.equal(1.0) + datapoint['Sum'].should.equal(1.5) diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py new file mode 100644 index 000000000..ea9ccbc78 --- /dev/null +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -0,0 +1,97 @@ +from __future__ import unicode_literals + +import boto3 + +from moto import mock_cognitoidentity +import sure # noqa + +from moto.cognitoidentity.utils import get_random_identity_id + + +@mock_cognitoidentity +def test_create_identity_pool(): + conn = boto3.client('cognito-identity', 'us-west-2') + + result = conn.create_identity_pool(IdentityPoolName='TestPool', + AllowUnauthenticatedIdentities=False, + SupportedLoginProviders={'graph.facebook.com': '123456789012345'}, + DeveloperProviderName='devname', + OpenIdConnectProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db'], + CognitoIdentityProviders=[ + { + 'ProviderName': 'testprovider', + 'ClientId': 'CLIENT12345', + 'ServerSideTokenCheck': True + }, + ], + SamlProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db']) + assert result['IdentityPoolId'] != '' + + +# testing a helper function +def test_get_random_identity_id(): + assert len(get_random_identity_id('us-west-2')) > 0 + assert len(get_random_identity_id('us-west-2').split(':')[1]) == 19 + + +@mock_cognitoidentity +def test_get_id(): + # These two do NOT work in server mode. They just don't return the data from the model. + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_id(AccountId='someaccount', + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }) + print(result) + assert result.get('IdentityId', "").startswith('us-west-2') or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + + +@mock_cognitoidentity +def test_get_credentials_for_identity(): + # These two do NOT work in server mode. They just don't return the data from the model. + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_credentials_for_identity(IdentityId='12345') + + assert result.get('Expiration', 0) > 0 or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + assert result.get('IdentityId') == '12345' or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + + +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + IdentityId='12345', + Logins={ + 'someurl': '12345' + }, + TokenDuration=123 + ) + assert len(result['Token']) > 0 + assert result['IdentityId'] == '12345' + +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity_when_no_explicit_identity_id(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }, + TokenDuration=123 + ) + assert len(result['Token']) > 0 + assert len(result['IdentityId']) > 0 + +@mock_cognitoidentity +def test_get_open_id_token(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token( + IdentityId='12345', + Logins={ + 'someurl': '12345' + } + ) + assert len(result['Token']) > 0 + assert result['IdentityId'] == '12345' diff --git a/tests/test_cognitoidentity/test_server.py b/tests/test_cognitoidentity/test_server.py new file mode 100644 index 000000000..b63d42bc0 --- /dev/null +++ b/tests/test_cognitoidentity/test_server.py @@ -0,0 +1,45 @@ +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_cognitoidentity + +''' +Test the different server responses +''' + + +@mock_cognitoidentity +def test_create_identity_pool(): + + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data={"IdentityPoolName": "test", "AllowUnauthenticatedIdentities": True}, + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.CreateIdentityPool"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + assert json_data['IdentityPoolName'] == "test" + + +@mock_cognitoidentity +def test_get_id(): + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data=json.dumps({'AccountId': 'someaccount', + 'IdentityPoolId': 'us-west-2:12345', + 'Logins': {'someurl': '12345'}}), + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.GetId"}, + ) + + print(res.data) + json_data = json.loads(res.data.decode("utf-8")) + assert ':' in json_data['IdentityId'] diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py new file mode 100644 index 000000000..774ff7621 --- /dev/null +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -0,0 +1,1247 @@ +from __future__ import unicode_literals + +import json +import os +import random +import uuid + +import boto3 +# noinspection PyUnresolvedReferences +import sure # noqa +from botocore.exceptions import ClientError +from jose import jws +from nose.tools import assert_raises + +from moto import mock_cognitoidp + + +@mock_cognitoidp +def test_create_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + result = conn.create_user_pool( + PoolName=name, + LambdaConfig={ + "PreSignUp": value + } + ) + + result["UserPool"]["Id"].should_not.be.none + result["UserPool"]["Id"].should.match(r'[\w-]+_[0-9a-zA-Z]+') + result["UserPool"]["Name"].should.equal(name) + result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) + + +@mock_cognitoidp +def test_list_user_pools(): + conn = boto3.client("cognito-idp", "us-west-2") + + name = str(uuid.uuid4()) + conn.create_user_pool(PoolName=name) + result = conn.list_user_pools(MaxResults=10) + result["UserPools"].should.have.length_of(1) + result["UserPools"][0]["Name"].should.equal(name) + + +@mock_cognitoidp +def test_list_user_pools_returns_max_items(): + conn = boto3.client("cognito-idp", "us-west-2") + + # Given 10 user pools + pool_count = 10 + for i in range(pool_count): + conn.create_user_pool(PoolName=str(uuid.uuid4())) + + max_results = 5 + result = conn.list_user_pools(MaxResults=max_results) + result["UserPools"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + +@mock_cognitoidp +def test_list_user_pools_returns_next_tokens(): + conn = boto3.client("cognito-idp", "us-west-2") + + # Given 10 user pool clients + pool_count = 10 + for i in range(pool_count): + conn.create_user_pool(PoolName=str(uuid.uuid4())) + + max_results = 5 + result = conn.list_user_pools(MaxResults=max_results) + result["UserPools"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + next_token = result["NextToken"] + result_2 = conn.list_user_pools(MaxResults=max_results, NextToken=next_token) + result_2["UserPools"].should.have.length_of(max_results) + result_2.shouldnt.have.key("NextToken") + + +@mock_cognitoidp +def test_list_user_pools_when_max_items_more_than_total_items(): + conn = boto3.client("cognito-idp", "us-west-2") + + # Given 10 user pool clients + pool_count = 10 + for i in range(pool_count): + conn.create_user_pool(PoolName=str(uuid.uuid4())) + + max_results = pool_count + 5 + result = conn.list_user_pools(MaxResults=max_results) + result["UserPools"].should.have.length_of(pool_count) + result.shouldnt.have.key("NextToken") + + +@mock_cognitoidp +def test_describe_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_details = conn.create_user_pool( + PoolName=name, + LambdaConfig={ + "PreSignUp": value + } + ) + + result = conn.describe_user_pool(UserPoolId=user_pool_details["UserPool"]["Id"]) + result["UserPool"]["Name"].should.equal(name) + result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) + + +@mock_cognitoidp +def test_delete_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(1) + conn.delete_user_pool(UserPoolId=user_pool_id) + conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(0) + + +@mock_cognitoidp +def test_create_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + +@mock_cognitoidp +def test_create_user_pool_domain_custom_domain_config(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + custom_domain_config = { + "CertificateArn": "arn:aws:acm:us-east-1:123456789012:certificate/123456789012", + } + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_user_pool_domain( + UserPoolId=user_pool_id, Domain=domain, CustomDomainConfig=custom_domain_config + ) + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + result["CloudFrontDomain"].should.equal("e2c343b3293ee505.cloudfront.net") + + +@mock_cognitoidp +def test_describe_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result = conn.describe_user_pool_domain(Domain=domain) + result["DomainDescription"]["Domain"].should.equal(domain) + result["DomainDescription"]["UserPoolId"].should.equal(user_pool_id) + result["DomainDescription"]["AWSAccountId"].should_not.be.none + + +@mock_cognitoidp +def test_delete_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result = conn.delete_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + result = conn.describe_user_pool_domain(Domain=domain) + # This is a surprising behavior of the real service: describing a missing domain comes + # back with status 200 and a DomainDescription of {} + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + result["DomainDescription"].keys().should.have.length_of(0) + + +@mock_cognitoidp +def test_update_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + custom_domain_config = { + "CertificateArn": "arn:aws:acm:us-east-1:123456789012:certificate/123456789012", + } + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result = conn.update_user_pool_domain( + UserPoolId=user_pool_id, Domain=domain, CustomDomainConfig=custom_domain_config + ) + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + result["CloudFrontDomain"].should.equal("e2c343b3293ee505.cloudfront.net") + + +@mock_cognitoidp +def test_create_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=client_name, + CallbackURLs=[value], + ) + + result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) + result["UserPoolClient"]["ClientId"].should_not.be.none + result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) + + +@mock_cognitoidp +def test_list_user_pool_clients(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) + result = conn.list_user_pool_clients(UserPoolId=user_pool_id, MaxResults=10) + result["UserPoolClients"].should.have.length_of(1) + result["UserPoolClients"][0]["ClientName"].should.equal(client_name) + + +@mock_cognitoidp +def test_list_user_pool_clients_returns_max_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 user pool clients + client_count = 10 + for i in range(client_count): + client_name = str(uuid.uuid4()) + conn.create_user_pool_client(UserPoolId=user_pool_id, + ClientName=client_name) + max_results = 5 + result = conn.list_user_pool_clients(UserPoolId=user_pool_id, + MaxResults=max_results) + result["UserPoolClients"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + +@mock_cognitoidp +def test_list_user_pool_clients_returns_next_tokens(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 user pool clients + client_count = 10 + for i in range(client_count): + client_name = str(uuid.uuid4()) + conn.create_user_pool_client(UserPoolId=user_pool_id, + ClientName=client_name) + max_results = 5 + result = conn.list_user_pool_clients(UserPoolId=user_pool_id, + MaxResults=max_results) + result["UserPoolClients"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + next_token = result["NextToken"] + result_2 = conn.list_user_pool_clients(UserPoolId=user_pool_id, + MaxResults=max_results, + NextToken=next_token) + result_2["UserPoolClients"].should.have.length_of(max_results) + result_2.shouldnt.have.key("NextToken") + + +@mock_cognitoidp +def test_list_user_pool_clients_when_max_items_more_than_total_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 user pool clients + client_count = 10 + for i in range(client_count): + client_name = str(uuid.uuid4()) + conn.create_user_pool_client(UserPoolId=user_pool_id, + ClientName=client_name) + max_results = client_count + 5 + result = conn.list_user_pool_clients(UserPoolId=user_pool_id, + MaxResults=max_results) + result["UserPoolClients"].should.have.length_of(client_count) + result.shouldnt.have.key("NextToken") + + +@mock_cognitoidp +def test_describe_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=client_name, + CallbackURLs=[value], + ) + + result = conn.describe_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ) + + result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) + + +@mock_cognitoidp +def test_update_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + old_client_name = str(uuid.uuid4()) + new_client_name = str(uuid.uuid4()) + old_value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=old_client_name, + CallbackURLs=[old_value], + ) + + result = conn.update_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ClientName=new_client_name, + CallbackURLs=[new_value], + ) + + result["UserPoolClient"]["ClientName"].should.equal(new_client_name) + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) + + +@mock_cognitoidp +def test_delete_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + ) + + conn.delete_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ) + + caught = False + try: + conn.describe_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ) + except conn.exceptions.ResourceNotFoundException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_create_identity_provider(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) + result["IdentityProvider"]["ProviderName"].should.equal(provider_name) + result["IdentityProvider"]["ProviderType"].should.equal(provider_type) + result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) + + +@mock_cognitoidp +def test_list_identity_providers(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={}, + ) + + result = conn.list_identity_providers( + UserPoolId=user_pool_id, + MaxResults=10, + ) + + result["Providers"].should.have.length_of(1) + result["Providers"][0]["ProviderName"].should.equal(provider_name) + result["Providers"][0]["ProviderType"].should.equal(provider_type) + + +@mock_cognitoidp +def test_list_identity_providers_returns_max_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 identity providers linked to a user pool + identity_provider_count = 10 + for i in range(identity_provider_count): + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={}, + ) + + max_results = 5 + result = conn.list_identity_providers(UserPoolId=user_pool_id, + MaxResults=max_results) + result["Providers"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + +@mock_cognitoidp +def test_list_identity_providers_returns_next_tokens(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 identity providers linked to a user pool + identity_provider_count = 10 + for i in range(identity_provider_count): + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={}, + ) + + max_results = 5 + result = conn.list_identity_providers(UserPoolId=user_pool_id, MaxResults=max_results) + result["Providers"].should.have.length_of(max_results) + result.should.have.key("NextToken") + + next_token = result["NextToken"] + result_2 = conn.list_identity_providers(UserPoolId=user_pool_id, + MaxResults=max_results, + NextToken=next_token) + result_2["Providers"].should.have.length_of(max_results) + result_2.shouldnt.have.key("NextToken") + + +@mock_cognitoidp +def test_list_identity_providers_when_max_items_more_than_total_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 identity providers linked to a user pool + identity_provider_count = 10 + for i in range(identity_provider_count): + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={}, + ) + + max_results = identity_provider_count + 5 + result = conn.list_identity_providers(UserPoolId=user_pool_id, MaxResults=max_results) + result["Providers"].should.have.length_of(identity_provider_count) + result.shouldnt.have.key("NextToken") + + +@mock_cognitoidp +def test_describe_identity_providers(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + result = conn.describe_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ) + + result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) + result["IdentityProvider"]["ProviderName"].should.equal(provider_name) + result["IdentityProvider"]["ProviderType"].should.equal(provider_type) + result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) + + +@mock_cognitoidp +def test_update_identity_provider(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + result = conn.update_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderDetails={ + "thing": new_value + }, + ) + + result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) + result["IdentityProvider"]["ProviderName"].should.equal(provider_name) + result["IdentityProvider"]["ProviderType"].should.equal(provider_type) + result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(new_value) + + +@mock_cognitoidp +def test_update_identity_provider_no_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + new_value = str(uuid.uuid4()) + + with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + conn.update_identity_provider( + UserPoolId="foo", + ProviderName="bar", + ProviderDetails={ + "thing": new_value + }, + ) + + cm.exception.operation_name.should.equal('UpdateIdentityProvider') + cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + +@mock_cognitoidp +def test_update_identity_provider_no_identity_provider(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + with assert_raises(conn.exceptions.ResourceNotFoundException) as cm: + conn.update_identity_provider( + UserPoolId=user_pool_id, + ProviderName="foo", + ProviderDetails={ + "thing": new_value + }, + ) + + cm.exception.operation_name.should.equal('UpdateIdentityProvider') + cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + +@mock_cognitoidp +def test_delete_identity_providers(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + conn.delete_identity_provider(UserPoolId=user_pool_id, ProviderName=provider_name) + + caught = False + try: + conn.describe_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ) + except conn.exceptions.ResourceNotFoundException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_create_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + description = str(uuid.uuid4()) + role_arn = "arn:aws:iam:::role/my-iam-role" + precedence = random.randint(0, 100000) + + result = conn.create_group( + GroupName=group_name, + UserPoolId=user_pool_id, + Description=description, + RoleArn=role_arn, + Precedence=precedence, + ) + + result["Group"]["GroupName"].should.equal(group_name) + result["Group"]["UserPoolId"].should.equal(user_pool_id) + result["Group"]["Description"].should.equal(description) + result["Group"]["RoleArn"].should.equal(role_arn) + result["Group"]["Precedence"].should.equal(precedence) + result["Group"]["LastModifiedDate"].should.be.a("datetime.datetime") + result["Group"]["CreationDate"].should.be.a("datetime.datetime") + + +@mock_cognitoidp +def test_create_group_with_duplicate_name_raises_error(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + with assert_raises(ClientError) as cm: + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + cm.exception.operation_name.should.equal('CreateGroup') + cm.exception.response['Error']['Code'].should.equal('GroupExistsException') + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + +@mock_cognitoidp +def test_get_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + result = conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) + + result["Group"]["GroupName"].should.equal(group_name) + result["Group"]["UserPoolId"].should.equal(user_pool_id) + result["Group"]["LastModifiedDate"].should.be.a("datetime.datetime") + result["Group"]["CreationDate"].should.be.a("datetime.datetime") + + +@mock_cognitoidp +def test_list_groups(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + result = conn.list_groups(UserPoolId=user_pool_id) + + result["Groups"].should.have.length_of(1) + result["Groups"][0]["GroupName"].should.equal(group_name) + + +@mock_cognitoidp +def test_delete_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + result = conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + with assert_raises(ClientError) as cm: + conn.get_group(GroupName=group_name, UserPoolId=user_pool_id) + cm.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + + +@mock_cognitoidp +def test_admin_add_user_to_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + result = conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + +@mock_cognitoidp +def test_admin_add_user_to_group_again_is_noop(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + +@mock_cognitoidp +def test_list_users_in_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + result = conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) + + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username) + + +@mock_cognitoidp +def test_list_users_in_group_ignores_deleted_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + username2 = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username2) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username2, GroupName=group_name) + conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) + + result = conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) + + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username2) + + +@mock_cognitoidp +def test_admin_list_groups_for_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) + + result["Groups"].should.have.length_of(1) + result["Groups"][0]["GroupName"].should.equal(group_name) + + +@mock_cognitoidp +def test_admin_list_groups_for_user_ignores_deleted_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + group_name2 = str(uuid.uuid4()) + conn.create_group(GroupName=group_name2, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name2) + conn.delete_group(GroupName=group_name, UserPoolId=user_pool_id) + + result = conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) + + result["Groups"].should.have.length_of(1) + result["Groups"][0]["GroupName"].should.equal(group_name2) + + +@mock_cognitoidp +def test_admin_remove_user_from_group(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + result = conn.admin_remove_user_from_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + conn.list_users_in_group(UserPoolId=user_pool_id, GroupName=group_name) \ + ["Users"].should.have.length_of(0) + conn.admin_list_groups_for_user(Username=username, UserPoolId=user_pool_id) \ + ["Groups"].should.have.length_of(0) + + +@mock_cognitoidp +def test_admin_remove_user_from_group_again_is_noop(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + group_name = str(uuid.uuid4()) + conn.create_group(GroupName=group_name, UserPoolId=user_pool_id) + + username = str(uuid.uuid4()) + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + conn.admin_add_user_to_group(UserPoolId=user_pool_id, Username=username, GroupName=group_name) + + +@mock_cognitoidp +def test_admin_create_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + {"Name": "thing", "Value": value} + ], + ) + + result["User"]["Username"].should.equal(username) + result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD") + result["User"]["Attributes"].should.have.length_of(1) + result["User"]["Attributes"][0]["Name"].should.equal("thing") + result["User"]["Attributes"][0]["Value"].should.equal(value) + result["User"]["Enabled"].should.equal(True) + + +@mock_cognitoidp +def test_admin_get_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + {"Name": "thing", "Value": value} + ], + ) + + result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + result["Username"].should.equal(username) + result["UserAttributes"].should.have.length_of(1) + result["UserAttributes"][0]["Name"].should.equal("thing") + result["UserAttributes"][0]["Value"].should.equal(value) + + +@mock_cognitoidp +def test_admin_get_missing_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + caught = False + try: + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + except conn.exceptions.UserNotFoundException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_list_users(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + result = conn.list_users(UserPoolId=user_pool_id) + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username) + + +@mock_cognitoidp +def test_list_users_returns_limit_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 users + user_count = 10 + for i in range(user_count): + conn.admin_create_user(UserPoolId=user_pool_id, + Username=str(uuid.uuid4())) + max_results = 5 + result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) + result["Users"].should.have.length_of(max_results) + result.should.have.key("PaginationToken") + + +@mock_cognitoidp +def test_list_users_returns_pagination_tokens(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 users + user_count = 10 + for i in range(user_count): + conn.admin_create_user(UserPoolId=user_pool_id, + Username=str(uuid.uuid4())) + + max_results = 5 + result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) + result["Users"].should.have.length_of(max_results) + result.should.have.key("PaginationToken") + + next_token = result["PaginationToken"] + result_2 = conn.list_users(UserPoolId=user_pool_id, + Limit=max_results, PaginationToken=next_token) + result_2["Users"].should.have.length_of(max_results) + result_2.shouldnt.have.key("PaginationToken") + + +@mock_cognitoidp +def test_list_users_when_limit_more_than_total_items(): + conn = boto3.client("cognito-idp", "us-west-2") + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + # Given 10 users + user_count = 10 + for i in range(user_count): + conn.admin_create_user(UserPoolId=user_pool_id, + Username=str(uuid.uuid4())) + + max_results = user_count + 5 + result = conn.list_users(UserPoolId=user_pool_id, Limit=max_results) + result["Users"].should.have.length_of(user_count) + result.shouldnt.have.key("PaginationToken") + + +@mock_cognitoidp +def test_admin_disable_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ + ["Enabled"].should.equal(False) + + +@mock_cognitoidp +def test_admin_enable_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) + + result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ + ["Enabled"].should.equal(True) + + +@mock_cognitoidp +def test_admin_delete_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) + + caught = False + try: + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + except conn.exceptions.UserNotFoundException: + caught = True + + caught.should.be.true + + +def authentication_flow(conn): + username = str(uuid.uuid4()) + temporary_password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + user_attribute_name = str(uuid.uuid4()) + user_attribute_value = str(uuid.uuid4()) + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + ReadAttributes=[user_attribute_name] + )["UserPoolClient"]["ClientId"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + TemporaryPassword=temporary_password, + UserAttributes=[{ + 'Name': user_attribute_name, + 'Value': user_attribute_value + }] + ) + + result = conn.admin_initiate_auth( + UserPoolId=user_pool_id, + ClientId=client_id, + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "PASSWORD": temporary_password + }, + ) + + # A newly created user is forced to set a new password + result["ChallengeName"].should.equal("NEW_PASSWORD_REQUIRED") + result["Session"].should_not.be.none + + # This sets a new password and logs the user in (creates tokens) + new_password = str(uuid.uuid4()) + result = conn.respond_to_auth_challenge( + Session=result["Session"], + ClientId=client_id, + ChallengeName="NEW_PASSWORD_REQUIRED", + ChallengeResponses={ + "USERNAME": username, + "NEW_PASSWORD": new_password + } + ) + + result["AuthenticationResult"]["IdToken"].should_not.be.none + result["AuthenticationResult"]["AccessToken"].should_not.be.none + + return { + "user_pool_id": user_pool_id, + "client_id": client_id, + "id_token": result["AuthenticationResult"]["IdToken"], + "access_token": result["AuthenticationResult"]["AccessToken"], + "username": username, + "password": new_password, + "additional_fields": { + user_attribute_name: user_attribute_value + } + } + + +@mock_cognitoidp +def test_authentication_flow(): + conn = boto3.client("cognito-idp", "us-west-2") + + authentication_flow(conn) + + +@mock_cognitoidp +def test_token_legitimacy(): + conn = boto3.client("cognito-idp", "us-west-2") + + path = "../../moto/cognitoidp/resources/jwks-public.json" + with open(os.path.join(os.path.dirname(__file__), path)) as f: + json_web_key = json.loads(f.read())["keys"][0] + + outputs = authentication_flow(conn) + id_token = outputs["id_token"] + access_token = outputs["access_token"] + client_id = outputs["client_id"] + issuer = "https://cognito-idp.us-west-2.amazonaws.com/{}".format(outputs["user_pool_id"]) + id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256")) + id_claims["iss"].should.equal(issuer) + id_claims["aud"].should.equal(client_id) + access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) + access_claims["iss"].should.equal(issuer) + access_claims["aud"].should.equal(client_id) + for k, v in outputs["additional_fields"].items(): + access_claims[k].should.equal(v) + + +@mock_cognitoidp +def test_change_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + outputs = authentication_flow(conn) + + # Take this opportunity to test change_password, which requires an access token. + newer_password = str(uuid.uuid4()) + conn.change_password( + AccessToken=outputs["access_token"], + PreviousPassword=outputs["password"], + ProposedPassword=newer_password, + ) + + # Log in again, which should succeed without a challenge because the user is no + # longer in the force-new-password state. + result = conn.admin_initiate_auth( + UserPoolId=outputs["user_pool_id"], + ClientId=outputs["client_id"], + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={ + "USERNAME": outputs["username"], + "PASSWORD": newer_password, + }, + ) + + result["AuthenticationResult"].should_not.be.none + + +@mock_cognitoidp +def test_forgot_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + result = conn.forgot_password(ClientId=str(uuid.uuid4()), Username=str(uuid.uuid4())) + result["CodeDeliveryDetails"].should_not.be.none + + +@mock_cognitoidp +def test_confirm_forgot_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + )["UserPoolClient"]["ClientId"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + TemporaryPassword=str(uuid.uuid4()), + ) + + conn.confirm_forgot_password( + ClientId=client_id, + Username=username, + ConfirmationCode=str(uuid.uuid4()), + Password=str(uuid.uuid4()), + ) + +@mock_cognitoidp +def test_admin_update_user_attributes(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + { + 'Name': 'family_name', + 'Value': 'Doe', + }, + { + 'Name': 'given_name', + 'Value': 'John', + } + ] + ) + + conn.admin_update_user_attributes( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + { + 'Name': 'family_name', + 'Value': 'Doe', + }, + { + 'Name': 'given_name', + 'Value': 'Jane', + } + ] + ) + + user = conn.admin_get_user( + UserPoolId=user_pool_id, + Username=username + ) + attributes = user['UserAttributes'] + attributes.should.be.a(list) + for attr in attributes: + val = attr['Value'] + if attr['Name'] == 'family_name': + val.should.equal('Doe') + elif attr['Name'] == 'given_name': + val.should.equal('Jane') diff --git a/tests/test_config/test_config.py b/tests/test_config/test_config.py new file mode 100644 index 000000000..96c62455c --- /dev/null +++ b/tests/test_config/test_config.py @@ -0,0 +1,491 @@ +from datetime import datetime, timedelta + +import boto3 +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto.config import mock_config + + +@mock_config +def test_put_configuration_recorder(): + client = boto3.client('config', region_name='us-west-2') + + # Try without a name supplied: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={'roleARN': 'somearn'}) + assert ce.exception.response['Error']['Code'] == 'InvalidConfigurationRecorderNameException' + assert 'is not valid, blank string.' in ce.exception.response['Error']['Message'] + + # Try with a really long name: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={'name': 'a' * 257, 'roleARN': 'somearn'}) + assert ce.exception.response['Error']['Code'] == 'ValidationException' + assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message'] + + # With resource types and flags set to True: + bad_groups = [ + {'allSupported': True, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']}, + {'allSupported': False, 'includeGlobalResourceTypes': True, 'resourceTypes': ['item']}, + {'allSupported': True, 'includeGlobalResourceTypes': False, 'resourceTypes': ['item']}, + {'allSupported': False, 'includeGlobalResourceTypes': False, 'resourceTypes': []}, + {'includeGlobalResourceTypes': False, 'resourceTypes': []}, + {'includeGlobalResourceTypes': True}, + {'resourceTypes': []}, + {} + ] + + for bg in bad_groups: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'default', + 'roleARN': 'somearn', + 'recordingGroup': bg + }) + assert ce.exception.response['Error']['Code'] == 'InvalidRecordingGroupException' + assert ce.exception.response['Error']['Message'] == 'The recording group provided is not valid' + + # With an invalid Resource Type: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'default', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + # 2 good, and 2 bad: + 'resourceTypes': ['AWS::EC2::Volume', 'LOLNO', 'AWS::EC2::VPC', 'LOLSTILLNO'] + } + }) + assert ce.exception.response['Error']['Code'] == 'ValidationException' + assert "2 validation error detected: Value '['LOLNO', 'LOLSTILLNO']" in str(ce.exception.response['Error']['Message']) + assert 'AWS::EC2::Instance' in ce.exception.response['Error']['Message'] + + # Create a proper one: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + result = client.describe_configuration_recorders()['ConfigurationRecorders'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert result[0]['roleARN'] == 'somearn' + assert not result[0]['recordingGroup']['allSupported'] + assert not result[0]['recordingGroup']['includeGlobalResourceTypes'] + assert len(result[0]['recordingGroup']['resourceTypes']) == 2 + assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \ + and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes'] + + # Now update the configuration recorder: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': True, + 'includeGlobalResourceTypes': True + } + }) + result = client.describe_configuration_recorders()['ConfigurationRecorders'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert result[0]['roleARN'] == 'somearn' + assert result[0]['recordingGroup']['allSupported'] + assert result[0]['recordingGroup']['includeGlobalResourceTypes'] + assert len(result[0]['recordingGroup']['resourceTypes']) == 0 + + # With a default recording group (i.e. lacking one) + client.put_configuration_recorder(ConfigurationRecorder={'name': 'testrecorder', 'roleARN': 'somearn'}) + result = client.describe_configuration_recorders()['ConfigurationRecorders'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert result[0]['roleARN'] == 'somearn' + assert result[0]['recordingGroup']['allSupported'] + assert not result[0]['recordingGroup']['includeGlobalResourceTypes'] + assert not result[0]['recordingGroup'].get('resourceTypes') + + # Can currently only have exactly 1 Config Recorder in an account/region: + with assert_raises(ClientError) as ce: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'someotherrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + } + }) + assert ce.exception.response['Error']['Code'] == 'MaxNumberOfConfigurationRecordersExceededException' + assert "maximum number of configuration recorders: 1 is reached." in ce.exception.response['Error']['Message'] + + +@mock_config +def test_describe_configurations(): + client = boto3.client('config', region_name='us-west-2') + + # Without any configurations: + result = client.describe_configuration_recorders() + assert not result['ConfigurationRecorders'] + + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + result = client.describe_configuration_recorders()['ConfigurationRecorders'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert result[0]['roleARN'] == 'somearn' + assert not result[0]['recordingGroup']['allSupported'] + assert not result[0]['recordingGroup']['includeGlobalResourceTypes'] + assert len(result[0]['recordingGroup']['resourceTypes']) == 2 + assert 'AWS::EC2::Volume' in result[0]['recordingGroup']['resourceTypes'] \ + and 'AWS::EC2::VPC' in result[0]['recordingGroup']['resourceTypes'] + + # Specify an incorrect name: + with assert_raises(ClientError) as ce: + client.describe_configuration_recorders(ConfigurationRecorderNames=['wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + # And with both a good and wrong name: + with assert_raises(ClientError) as ce: + client.describe_configuration_recorders(ConfigurationRecorderNames=['testrecorder', 'wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + +@mock_config +def test_delivery_channels(): + client = boto3.client('config', region_name='us-west-2') + + # Try without a config recorder: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={}) + assert ce.exception.response['Error']['Code'] == 'NoAvailableConfigurationRecorderException' + assert ce.exception.response['Error']['Message'] == 'Configuration recorder is not available to ' \ + 'put delivery channel.' + + # Create a config recorder to continue testing: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Try without a name supplied: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={}) + assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryChannelNameException' + assert 'is not valid, blank string.' in ce.exception.response['Error']['Message'] + + # Try with a really long name: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={'name': 'a' * 257}) + assert ce.exception.response['Error']['Code'] == 'ValidationException' + assert 'Member must have length less than or equal to 256' in ce.exception.response['Error']['Message'] + + # Without specifying a bucket name: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel'}) + assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException' + assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.' + + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': ''}) + assert ce.exception.response['Error']['Code'] == 'NoSuchBucketException' + assert ce.exception.response['Error']['Message'] == 'Cannot find a S3 bucket with an empty bucket name.' + + # With an empty string for the S3 key prefix: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', 's3BucketName': 'somebucket', 's3KeyPrefix': ''}) + assert ce.exception.response['Error']['Code'] == 'InvalidS3KeyPrefixException' + assert 'empty s3 key prefix.' in ce.exception.response['Error']['Message'] + + # With an empty string for the SNS ARN: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', 's3BucketName': 'somebucket', 'snsTopicARN': ''}) + assert ce.exception.response['Error']['Code'] == 'InvalidSNSTopicARNException' + assert 'The sns topic arn' in ce.exception.response['Error']['Message'] + + # With an invalid delivery frequency: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', + 's3BucketName': 'somebucket', + 'configSnapshotDeliveryProperties': {'deliveryFrequency': 'WRONG'} + }) + assert ce.exception.response['Error']['Code'] == 'InvalidDeliveryFrequency' + assert 'WRONG' in ce.exception.response['Error']['Message'] + assert 'TwentyFour_Hours' in ce.exception.response['Error']['Message'] + + # Create a proper one: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + result = client.describe_delivery_channels()['DeliveryChannels'] + assert len(result) == 1 + assert len(result[0].keys()) == 2 + assert result[0]['name'] == 'testchannel' + assert result[0]['s3BucketName'] == 'somebucket' + + # Overwrite it with another proper configuration: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', + 's3BucketName': 'somebucket', + 'snsTopicARN': 'sometopicarn', + 'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'} + }) + result = client.describe_delivery_channels()['DeliveryChannels'] + assert len(result) == 1 + assert len(result[0].keys()) == 4 + assert result[0]['name'] == 'testchannel' + assert result[0]['s3BucketName'] == 'somebucket' + assert result[0]['snsTopicARN'] == 'sometopicarn' + assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours' + + # Can only have 1: + with assert_raises(ClientError) as ce: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel2', 's3BucketName': 'somebucket'}) + assert ce.exception.response['Error']['Code'] == 'MaxNumberOfDeliveryChannelsExceededException' + assert 'because the maximum number of delivery channels: 1 is reached.' in ce.exception.response['Error']['Message'] + + +@mock_config +def test_describe_delivery_channels(): + client = boto3.client('config', region_name='us-west-2') + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Without any channels: + result = client.describe_delivery_channels() + assert not result['DeliveryChannels'] + + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + result = client.describe_delivery_channels()['DeliveryChannels'] + assert len(result) == 1 + assert len(result[0].keys()) == 2 + assert result[0]['name'] == 'testchannel' + assert result[0]['s3BucketName'] == 'somebucket' + + # Overwrite it with another proper configuration: + client.put_delivery_channel(DeliveryChannel={ + 'name': 'testchannel', + 's3BucketName': 'somebucket', + 'snsTopicARN': 'sometopicarn', + 'configSnapshotDeliveryProperties': {'deliveryFrequency': 'TwentyFour_Hours'} + }) + result = client.describe_delivery_channels()['DeliveryChannels'] + assert len(result) == 1 + assert len(result[0].keys()) == 4 + assert result[0]['name'] == 'testchannel' + assert result[0]['s3BucketName'] == 'somebucket' + assert result[0]['snsTopicARN'] == 'sometopicarn' + assert result[0]['configSnapshotDeliveryProperties']['deliveryFrequency'] == 'TwentyFour_Hours' + + # Specify an incorrect name: + with assert_raises(ClientError) as ce: + client.describe_delivery_channels(DeliveryChannelNames=['wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + # And with both a good and wrong name: + with assert_raises(ClientError) as ce: + client.describe_delivery_channels(DeliveryChannelNames=['testchannel', 'wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + +@mock_config +def test_start_configuration_recorder(): + client = boto3.client('config', region_name='us-west-2') + + # Without a config recorder: + with assert_raises(ClientError) as ce: + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + + # Make the config recorder; + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Without a delivery channel: + with assert_raises(ClientError) as ce: + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + assert ce.exception.response['Error']['Code'] == 'NoAvailableDeliveryChannelException' + + # Make the delivery channel: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + + # Start it: + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # Verify it's enabled: + result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus'] + lower_bound = (datetime.utcnow() - timedelta(minutes=5)) + assert result[0]['recording'] + assert result[0]['lastStatus'] == 'PENDING' + assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow() + assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow() + + +@mock_config +def test_stop_configuration_recorder(): + client = boto3.client('config', region_name='us-west-2') + + # Without a config recorder: + with assert_raises(ClientError) as ce: + client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder') + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + + # Make the config recorder; + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Make the delivery channel for creation: + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + + # Start it: + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # Verify it's disabled: + result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus'] + lower_bound = (datetime.utcnow() - timedelta(minutes=5)) + assert not result[0]['recording'] + assert result[0]['lastStatus'] == 'PENDING' + assert lower_bound < result[0]['lastStartTime'].replace(tzinfo=None) <= datetime.utcnow() + assert lower_bound < result[0]['lastStopTime'].replace(tzinfo=None) <= datetime.utcnow() + assert lower_bound < result[0]['lastStatusChangeTime'].replace(tzinfo=None) <= datetime.utcnow() + + +@mock_config +def test_describe_configuration_recorder_status(): + client = boto3.client('config', region_name='us-west-2') + + # Without any: + result = client.describe_configuration_recorder_status() + assert not result['ConfigurationRecordersStatus'] + + # Make the config recorder; + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Without specifying a config recorder: + result = client.describe_configuration_recorder_status()['ConfigurationRecordersStatus'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert not result[0]['recording'] + + # With a proper name: + result = client.describe_configuration_recorder_status( + ConfigurationRecorderNames=['testrecorder'])['ConfigurationRecordersStatus'] + assert len(result) == 1 + assert result[0]['name'] == 'testrecorder' + assert not result[0]['recording'] + + # Invalid name: + with assert_raises(ClientError) as ce: + client.describe_configuration_recorder_status(ConfigurationRecorderNames=['testrecorder', 'wrong']) + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + assert 'wrong' in ce.exception.response['Error']['Message'] + + +@mock_config +def test_delete_configuration_recorder(): + client = boto3.client('config', region_name='us-west-2') + + # Make the config recorder; + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + + # Delete it: + client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # Try again -- it should be deleted: + with assert_raises(ClientError) as ce: + client.delete_configuration_recorder(ConfigurationRecorderName='testrecorder') + assert ce.exception.response['Error']['Code'] == 'NoSuchConfigurationRecorderException' + + +@mock_config +def test_delete_delivery_channel(): + client = boto3.client('config', region_name='us-west-2') + + # Need a recorder to test the constraint on recording being enabled: + client.put_configuration_recorder(ConfigurationRecorder={ + 'name': 'testrecorder', + 'roleARN': 'somearn', + 'recordingGroup': { + 'allSupported': False, + 'includeGlobalResourceTypes': False, + 'resourceTypes': ['AWS::EC2::Volume', 'AWS::EC2::VPC'] + } + }) + client.put_delivery_channel(DeliveryChannel={'name': 'testchannel', 's3BucketName': 'somebucket'}) + client.start_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # With the recorder enabled: + with assert_raises(ClientError) as ce: + client.delete_delivery_channel(DeliveryChannelName='testchannel') + assert ce.exception.response['Error']['Code'] == 'LastDeliveryChannelDeleteFailedException' + assert 'because there is a running configuration recorder.' in ce.exception.response['Error']['Message'] + + # Stop recording: + client.stop_configuration_recorder(ConfigurationRecorderName='testrecorder') + + # Try again: + client.delete_delivery_channel(DeliveryChannelName='testchannel') + + # Verify: + with assert_raises(ClientError) as ce: + client.delete_delivery_channel(DeliveryChannelName='testchannel') + assert ce.exception.response['Error']['Code'] == 'NoSuchDeliveryChannelException' diff --git a/tests/test_core/test_context_manager.py b/tests/test_core/test_context_manager.py new file mode 100644 index 000000000..4824e021f --- /dev/null +++ b/tests/test_core/test_context_manager.py @@ -0,0 +1,12 @@ +import sure # noqa +import boto3 +from moto import mock_sqs, settings + + +def test_context_manager_returns_mock(): + with mock_sqs() as sqs_mock: + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="queue1") + + if not settings.TEST_SERVER_MODE: + list(sqs_mock.backends['us-west-1'].queues.keys()).should.equal(['queue1']) diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 9e3638cc2..5d2f6a4ef 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -85,3 +85,14 @@ class TesterWithSetup(unittest.TestCase): def test_still_the_same(self): bucket = self.conn.get_bucket('mybucket') bucket.name.should.equal("mybucket") + + +@mock_s3_deprecated +class TesterWithStaticmethod(object): + + @staticmethod + def static(*args): + assert not args or not isinstance(args[0], TesterWithStaticmethod) + + def test_no_instance_sent_to_staticmethod(self): + self.static() diff --git a/tests/test_core/test_responses.py b/tests/test_core/test_responses.py index c3cc27aef..d0f672ab8 100644 --- a/tests/test_core/test_responses.py +++ b/tests/test_core/test_responses.py @@ -2,7 +2,9 @@ from __future__ import unicode_literals import sure # noqa -from moto.core.responses import AWSServiceSpec +from botocore.awsrequest import AWSPreparedRequest + +from moto.core.responses import AWSServiceSpec, BaseResponse from moto.core.responses import flatten_json_request_body @@ -79,3 +81,9 @@ def test_flatten_json_request_body(): i += 1 key = keyfmt.format(idx + 1, i) props.should.equal(body['Configurations'][idx]['Properties']) + + +def test_parse_qs_unicode_decode_error(): + body = b'{"key": "%D0"}, "C": "#0 = :0"}' + request = AWSPreparedRequest('GET', 'http://request', {'foo': 'bar'}, body, False) + BaseResponse().setup_class(request, request.url, request.headers) diff --git a/tests/test_core/test_socket.py b/tests/test_core/test_socket.py new file mode 100644 index 000000000..2e73d7b5f --- /dev/null +++ b/tests/test_core/test_socket.py @@ -0,0 +1,48 @@ +import unittest +from moto import mock_dynamodb2_deprecated, mock_dynamodb2 +import socket + +from six import PY3 + + +class TestSocketPair(unittest.TestCase): + + @mock_dynamodb2_deprecated + def test_asyncio_deprecated(self): + if PY3: + self.assertIn( + 'moto.packages.httpretty.core.fakesock.socket', + str(socket.socket), + 'Our mock should be present' + ) + import asyncio + self.assertIsNotNone(asyncio.get_event_loop()) + + @mock_dynamodb2_deprecated + def test_socket_pair_deprecated(self): + + # In Python2, the fakesocket is not set, for some reason. + if PY3: + self.assertIn( + 'moto.packages.httpretty.core.fakesock.socket', + str(socket.socket), + 'Our mock should be present' + ) + a, b = socket.socketpair() + self.assertIsNotNone(a) + self.assertIsNotNone(b) + if a: + a.close() + if b: + b.close() + + + @mock_dynamodb2 + def test_socket_pair(self): + a, b = socket.socketpair() + self.assertIsNotNone(a) + self.assertIsNotNone(b) + if a: + a.close() + if b: + b.close() diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 17c5310d4..7746cf66b 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,16 +1,17 @@ from __future__ import unicode_literals, print_function +from decimal import Decimal + import six import boto import boto3 -from boto3.dynamodb.conditions import Attr +from boto3.dynamodb.conditions import Attr, Key import sure # noqa import requests from moto import mock_dynamodb2, mock_dynamodb2_deprecated from moto.dynamodb2 import dynamodb_backend2 from boto.exception import JSONResponseError from botocore.exceptions import ClientError -from boto3.dynamodb.conditions import Key from tests.helpers import requires_boto_gte import tests.backport_assert_raises @@ -200,6 +201,48 @@ def test_item_add_empty_string_exception(): ) +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_item_with_empty_string_exception(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + with assert_raises(ClientError) as ex: + conn.update_item( + TableName=name, + Key={ + 'forum_name': { 'S': 'LOLCat Forum'}, + }, + UpdateExpression='set Body=:Body', + ExpressionAttributeValues={ + ':Body': {'S': ''} + }) + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' + ) + + @requires_boto_gte("2.9") @mock_dynamodb2 def test_query_invalid_table(): @@ -247,6 +290,33 @@ def test_scan_returns_consumed_capacity(): assert response['ConsumedCapacity']['TableName'] == name +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_put_item_with_special_chars(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + '"': {"S": "foo"}, + } + ) + + @requires_boto_gte("2.9") @mock_dynamodb2 def test_query_returns_consumed_capacity(): @@ -382,6 +452,90 @@ def test_basic_projection_expressions(): assert 'body' in results['Items'][1] assert 'forum_name' in results['Items'][1] +@mock_dynamodb2 +def test_basic_projection_expressions_using_scan(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message' + }) + # Test a scan returning all items + results = table.scan( + FilterExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body, subject' + ) + + assert 'body' in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'subject' in results['Items'][0] + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '1234', + 'body': 'yet another test message' + }) + + results = table.scan( + FilterExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body' + ) + + assert 'body' in results['Items'][0] + assert 'subject' not in results['Items'][0] + assert 'forum_name' not in results['Items'][0] + assert 'body' in results['Items'][1] + assert 'subject' not in results['Items'][1] + assert 'forum_name' not in results['Items'][1] + + # The projection expression should not remove data from storage + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ) + assert 'subject' in results['Items'][0] + assert 'body' in results['Items'][1] + assert 'forum_name' in results['Items'][1] + @mock_dynamodb2 def test_basic_projection_expressions_with_attr_expression_names(): @@ -449,6 +603,84 @@ def test_basic_projection_expressions_with_attr_expression_names(): assert 'attachment' in results['Items'][0] assert results['Items'][0]['attachment'] == 'something' +@mock_dynamodb2 +def test_basic_projection_expressions_using_scan_with_attr_expression_names(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + 'attachment': 'something' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message', + 'attachment': 'something' + }) + # Test a scan returning all items + + results = table.scan( + FilterExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='#rl, #rt, subject', + ExpressionAttributeNames={ + '#rl': 'body', + '#rt': 'attachment' + }, + ) + + assert 'body' in results['Items'][0] + assert 'attachment' in results['Items'][0] + assert 'subject' in results['Items'][0] + assert 'form_name' not in results['Items'][0] + + # Test without a FilterExpression + results = table.scan( + ProjectionExpression='#rl, #rt, subject', + ExpressionAttributeNames={ + '#rl': 'body', + '#rt': 'attachment' + }, + ) + + assert 'body' in results['Items'][0] + assert 'attachment' in results['Items'][0] + assert 'subject' in results['Items'][0] + assert 'form_name' not in results['Items'][0] + @mock_dynamodb2 def test_put_item_returns_consumed_capacity(): @@ -606,44 +838,47 @@ def test_filter_expression(): filter_expr.expr(row1).should.be(True) # NOT test 2 - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT (Id = :v0)', {}, {':v0': {'N': 8}}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT (Id = :v0)', {}, {':v0': {'N': '8'}}) filter_expr.expr(row1).should.be(False) # Id = 8 so should be false # AND test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': '5'}, ':v1': {'N': '7'}}) filter_expr.expr(row1).should.be(True) filter_expr.expr(row2).should.be(False) # OR test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': 5}, ':v1': {'N': 8}}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': '5'}, ':v1': {'N': '8'}}) filter_expr.expr(row1).should.be(True) # BETWEEN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 10}}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': '5'}, ':v1': {'N': '10'}}) filter_expr.expr(row1).should.be(True) # PAREN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': 8}, ':v1': {'N': 5}}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': '8'}, ':v1': {'N': '5'}}) filter_expr.expr(row1).should.be(True) # IN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN (:v0, :v1, :v2)', {}, { + ':v0': {'N': '7'}, + ':v1': {'N': '8'}, + ':v2': {'N': '9'}}) filter_expr.expr(row1).should.be(True) - # attribute function tests - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists(User)', {}, {}) + # attribute function tests (with extra spaces) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists (User)', {}, {}) filter_expr.expr(row1).should.be(True) - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, :v0)', {}, {':v0': {'S': 'N'}}) filter_expr.expr(row1).should.be(True) # beginswith function test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('begins_with(Desc, Some)', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('begins_with(Desc, :v0)', {}, {':v0': {'S': 'Some'}}) filter_expr.expr(row1).should.be(True) filter_expr.expr(row2).should.be(False) # contains function test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('contains(KV, test1)', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('contains(KV, :v0)', {}, {':v0': {'S': 'test1'}}) filter_expr.expr(row1).should.be(True) filter_expr.expr(row2).should.be(False) @@ -658,6 +893,14 @@ def test_filter_expression(): {':v0': {'N': '7'}} ) filter_expr.expr(row1).should.be(True) + # Expression from to check contains on string value + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + 'contains(#n0, :v0)', + {'#n0': 'Desc'}, + {':v0': {'S': 'Some'}} + ) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) @mock_dynamodb2 @@ -676,14 +919,26 @@ def test_query_filter(): TableName='test1', Item={ 'client': {'S': 'client1'}, - 'app': {'S': 'app1'} + 'app': {'S': 'app1'}, + 'nested': {'M': { + 'version': {'S': 'version1'}, + 'contents': {'L': [ + {'S': 'value1'}, {'S': 'value2'}, + ]}, + }}, } ) client.put_item( TableName='test1', Item={ 'client': {'S': 'client1'}, - 'app': {'S': 'app2'} + 'app': {'S': 'app2'}, + 'nested': {'M': { + 'version': {'S': 'version2'}, + 'contents': {'L': [ + {'S': 'value1'}, {'S': 'value2'}, + ]}, + }}, } ) @@ -699,6 +954,23 @@ def test_query_filter(): ) assert response['Count'] == 1 assert response['Items'][0]['app'] == 'app2' + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('app').contains('app') + ) + assert response['Count'] == 2 + + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('nested.version').contains('version') + ) + assert response['Count'] == 2 + + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('nested.contents[0]').eq('value1') + ) + assert response['Count'] == 2 @mock_dynamodb2 @@ -732,6 +1004,16 @@ def test_scan_filter(): ) assert response['Count'] == 1 + response = table.scan( + FilterExpression=Attr('app').ne('app2') + ) + assert response['Count'] == 1 + + response = table.scan( + FilterExpression=Attr('app').ne('app1') + ) + assert response['Count'] == 0 + @mock_dynamodb2 def test_scan_filter2(): @@ -789,6 +1071,26 @@ def test_scan_filter3(): ) assert response['Count'] == 1 + response = table.scan( + FilterExpression=Attr('active').ne(True) + ) + assert response['Count'] == 0 + + response = table.scan( + FilterExpression=Attr('active').ne(False) + ) + assert response['Count'] == 1 + + response = table.scan( + FilterExpression=Attr('app').ne(1) + ) + assert response['Count'] == 0 + + response = table.scan( + FilterExpression=Attr('app').ne(2) + ) + assert response['Count'] == 1 + @mock_dynamodb2 def test_scan_filter4(): @@ -836,6 +1138,33 @@ def test_bad_scan_filter(): raise RuntimeError('Should of raised ResourceInUseException') +@mock_dynamodb2 +def test_create_table_pay_per_request(): + client = boto3.client('dynamodb', region_name='us-east-1') + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + BillingMode="PAY_PER_REQUEST" + ) + + +@mock_dynamodb2 +def test_create_table_error_pay_per_request_with_provisioned_param(): + client = boto3.client('dynamodb', region_name='us-east-1') + + try: + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123}, + BillingMode="PAY_PER_REQUEST" + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ValidationException') + + @mock_dynamodb2 def test_duplicate_create(): client = boto3.client('dynamodb', region_name='us-east-1') @@ -917,6 +1246,11 @@ def test_delete_item(): response = table.scan() assert response['Count'] == 2 + # Test ReturnValues validation + with assert_raises(ClientError) as ex: + table.delete_item(Key={'client': 'client1', 'app': 'app1'}, + ReturnValues='ALL_NEW') + # Test deletion and returning old value response = table.delete_item(Key={'client': 'client1', 'app': 'app1'}, ReturnValues='ALL_OLD') response['Attributes'].should.contain('client') @@ -1006,3 +1340,764 @@ def test_query_missing_expr_names(): resp['Count'].should.equal(1) resp['Items'][0]['client']['S'].should.equal('test2') + + +# https://github.com/spulec/moto/issues/1342 +@mock_dynamodb2 +def test_update_item_on_map(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': {'nested': {'data': 'test'}}, + }) + + resp = table.scan() + resp['Items'][0]['body'].should.equal({'nested': {'data': 'test'}}) + + # Nonexistent nested attributes are supported for existing top-level attributes. + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET body.#nested.#data = :tb, body.nested.#nonexistentnested.#data = :tb2', + ExpressionAttributeNames={ + '#nested': 'nested', + '#nonexistentnested': 'nonexistentnested', + '#data': 'data' + }, + ExpressionAttributeValues={ + ':tb': 'new_value', + ':tb2': 'other_value' + }) + + resp = table.scan() + resp['Items'][0]['body'].should.equal({ + 'nested': { + 'data': 'new_value', + 'nonexistentnested': {'data': 'other_value'} + } + }) + + # Test nested value for a nonexistent attribute. + with assert_raises(client.exceptions.ConditionalCheckFailedException): + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET nonexistent.#nested = :tb', + ExpressionAttributeNames={ + '#nested': 'nested' + }, + ExpressionAttributeValues={ + ':tb': 'new_value' + }) + + + +# https://github.com/spulec/moto/issues/1358 +@mock_dynamodb2 +def test_update_if_not_exists(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + # if_not_exists without space + UpdateExpression='SET created_at=if_not_exists(created_at,:created_at)', + ExpressionAttributeValues={ + ':created_at': 123 + } + ) + + resp = table.scan() + assert resp['Items'][0]['created_at'] == 123 + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + # if_not_exists with space + UpdateExpression='SET created_at = if_not_exists (created_at, :created_at)', + ExpressionAttributeValues={ + ':created_at': 456 + } + ) + + resp = table.scan() + # Still the original value + assert resp['Items'][0]['created_at'] == 123 + + +# https://github.com/spulec/moto/issues/1937 +@mock_dynamodb2 +def test_update_return_attributes(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='moto-test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1} + ) + + def update(col, to, rv): + return dynamodb.update_item( + TableName='moto-test', + Key={'id': {'S': 'foo'}}, + AttributeUpdates={col: {'Value': {'S': to}, 'Action': 'PUT'}}, + ReturnValues=rv + ) + + r = update('col1', 'val1', 'ALL_NEW') + assert r['Attributes'] == {'id': {'S': 'foo'}, 'col1': {'S': 'val1'}} + + r = update('col1', 'val2', 'ALL_OLD') + assert r['Attributes'] == {'id': {'S': 'foo'}, 'col1': {'S': 'val1'}} + + r = update('col2', 'val3', 'UPDATED_NEW') + assert r['Attributes'] == {'col2': {'S': 'val3'}} + + r = update('col2', 'val4', 'UPDATED_OLD') + assert r['Attributes'] == {'col2': {'S': 'val3'}} + + r = update('col1', 'val5', 'NONE') + assert r['Attributes'] == {} + + with assert_raises(ClientError) as ex: + r = update('col1', 'val6', 'WRONG') + + +@mock_dynamodb2 +def test_put_return_attributes(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='moto-test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1} + ) + + r = dynamodb.put_item( + TableName='moto-test', + Item={'id': {'S': 'foo'}, 'col1': {'S': 'val1'}}, + ReturnValues='NONE' + ) + assert 'Attributes' not in r + + r = dynamodb.put_item( + TableName='moto-test', + Item={'id': {'S': 'foo'}, 'col1': {'S': 'val2'}}, + ReturnValues='ALL_OLD' + ) + assert r['Attributes'] == {'id': {'S': 'foo'}, 'col1': {'S': 'val1'}} + + with assert_raises(ClientError) as ex: + dynamodb.put_item( + TableName='moto-test', + Item={'id': {'S': 'foo'}, 'col1': {'S': 'val3'}}, + ReturnValues='ALL_NEW' + ) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal('Return values set to invalid value') + + +@mock_dynamodb2 +def test_query_global_secondary_index_when_created_via_update_table_resource(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'user_id', + 'KeyType': 'HASH' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'user_id', + 'AttributeType': 'N', + }, + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + }, + ) + table = dynamodb.Table('users') + table.update( + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + ], + GlobalSecondaryIndexUpdates=[ + {'Create': + { + 'IndexName': 'forum_name_index', + 'KeySchema': [ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH', + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + }, + } + } + ] + ) + + next_user_id = 1 + for my_forum_name in ['cats', 'dogs']: + for my_subject in ['my pet is the cutest', 'wow look at what my pet did', "don't you love my pet?"]: + table.put_item(Item={'user_id': next_user_id, 'forum_name': my_forum_name, 'subject': my_subject}) + next_user_id += 1 + + # get all the cat users + forum_only_query_response = table.query( + IndexName='forum_name_index', + Select='ALL_ATTRIBUTES', + KeyConditionExpression=Key('forum_name').eq('cats'), + ) + forum_only_items = forum_only_query_response['Items'] + assert len(forum_only_items) == 3 + for item in forum_only_items: + assert item['forum_name'] == 'cats' + + # query all cat users with a particular subject + forum_and_subject_query_results = table.query( + IndexName='forum_name_index', + Select='ALL_ATTRIBUTES', + KeyConditionExpression=Key('forum_name').eq('cats'), + FilterExpression=Attr('subject').eq('my pet is the cutest'), + ) + forum_and_subject_items = forum_and_subject_query_results['Items'] + assert len(forum_and_subject_items) == 1 + assert forum_and_subject_items[0] == {'user_id': Decimal('1'), 'forum_name': 'cats', + 'subject': 'my pet is the cutest'} + + +@mock_dynamodb2 +def test_dynamodb_streams_1(): + conn = boto3.client('dynamodb', region_name='us-east-1') + + resp = conn.create_table( + TableName='test-streams', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + StreamSpecification={ + 'StreamEnabled': True, + 'StreamViewType': 'NEW_AND_OLD_IMAGES' + } + ) + + assert 'StreamSpecification' in resp['TableDescription'] + assert resp['TableDescription']['StreamSpecification'] == { + 'StreamEnabled': True, + 'StreamViewType': 'NEW_AND_OLD_IMAGES' + } + assert 'LatestStreamLabel' in resp['TableDescription'] + assert 'LatestStreamArn' in resp['TableDescription'] + + resp = conn.delete_table(TableName='test-streams') + + assert 'StreamSpecification' in resp['TableDescription'] + + +@mock_dynamodb2 +def test_dynamodb_streams_2(): + conn = boto3.client('dynamodb', region_name='us-east-1') + + resp = conn.create_table( + TableName='test-stream-update', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + ) + + assert 'StreamSpecification' not in resp['TableDescription'] + + resp = conn.update_table( + TableName='test-stream-update', + StreamSpecification={ + 'StreamEnabled': True, + 'StreamViewType': 'NEW_IMAGE' + } + ) + + assert 'StreamSpecification' in resp['TableDescription'] + assert resp['TableDescription']['StreamSpecification'] == { + 'StreamEnabled': True, + 'StreamViewType': 'NEW_IMAGE' + } + assert 'LatestStreamLabel' in resp['TableDescription'] + assert 'LatestStreamArn' in resp['TableDescription'] + + +@mock_dynamodb2 +def test_condition_expressions(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + } + ) + + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='attribute_exists(#existing) AND attribute_not_exists(#nonexistent) AND #match = :match', + ExpressionAttributeNames={ + '#existing': 'existing', + '#nonexistent': 'nope', + '#match': 'match', + }, + ExpressionAttributeValues={ + ':match': {'S': 'match'} + } + ) + + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='NOT(attribute_exists(#nonexistent1) AND attribute_exists(#nonexistent2))', + ExpressionAttributeNames={ + '#nonexistent1': 'nope', + '#nonexistent2': 'nope2' + } + ) + + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='attribute_exists(#nonexistent) OR attribute_exists(#existing)', + ExpressionAttributeNames={ + '#nonexistent': 'nope', + '#existing': 'existing' + } + ) + + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='#client BETWEEN :a AND :z', + ExpressionAttributeNames={ + '#client': 'client', + }, + ExpressionAttributeValues={ + ':a': {'S': 'a'}, + ':z': {'S': 'z'}, + } + ) + + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='#client IN (:client1, :client2)', + ExpressionAttributeNames={ + '#client': 'client', + }, + ExpressionAttributeValues={ + ':client1': {'S': 'client1'}, + ':client2': {'S': 'client2'}, + } + ) + + with assert_raises(client.exceptions.ConditionalCheckFailedException): + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='attribute_exists(#nonexistent1) AND attribute_exists(#nonexistent2)', + ExpressionAttributeNames={ + '#nonexistent1': 'nope', + '#nonexistent2': 'nope2' + } + ) + + with assert_raises(client.exceptions.ConditionalCheckFailedException): + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='NOT(attribute_not_exists(#nonexistent1) AND attribute_not_exists(#nonexistent2))', + ExpressionAttributeNames={ + '#nonexistent1': 'nope', + '#nonexistent2': 'nope2' + } + ) + + with assert_raises(client.exceptions.ConditionalCheckFailedException): + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + 'match': {'S': 'match'}, + 'existing': {'S': 'existing'}, + }, + ConditionExpression='attribute_exists(#existing) AND attribute_not_exists(#nonexistent) AND #match = :match', + ExpressionAttributeNames={ + '#existing': 'existing', + '#nonexistent': 'nope', + '#match': 'match', + }, + ExpressionAttributeValues={ + ':match': {'S': 'match2'} + } + ) + + # Make sure update_item honors ConditionExpression as well + client.update_item( + TableName='test1', + Key={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'}, + }, + UpdateExpression='set #match=:match', + ConditionExpression='attribute_exists(#existing)', + ExpressionAttributeNames={ + '#existing': 'existing', + '#match': 'match', + }, + ExpressionAttributeValues={ + ':match': {'S': 'match'} + } + ) + + with assert_raises(client.exceptions.ConditionalCheckFailedException): + client.update_item( + TableName='test1', + Key={ + 'client': { 'S': 'client1'}, + 'app': { 'S': 'app1'}, + }, + UpdateExpression='set #match=:match', + ConditionExpression='attribute_not_exists(#existing)', + ExpressionAttributeValues={ + ':match': {'S': 'match'} + }, + ExpressionAttributeNames={ + '#existing': 'existing', + '#match': 'match', + }, + ) + + +@mock_dynamodb2 +def test_condition_expression__attr_doesnt_exist(): + client = boto3.client('dynamodb', region_name='us-east-1') + + client.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'forum_name', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'forum_name', 'AttributeType': 'S'}, + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + ) + + client.put_item( + TableName='test', + Item={ + 'forum_name': {'S': 'foo'}, + 'ttl': {'N': 'bar'}, + } + ) + + + def update_if_attr_doesnt_exist(): + # Test nonexistent top-level attribute. + client.update_item( + TableName='test', + Key={ + 'forum_name': {'S': 'the-key'}, + 'subject': {'S': 'the-subject'}, + }, + UpdateExpression='set #new_state=:new_state, #ttl=:ttl', + ConditionExpression='attribute_not_exists(#new_state)', + ExpressionAttributeNames={'#new_state': 'foobar', '#ttl': 'ttl'}, + ExpressionAttributeValues={ + ':new_state': {'S': 'some-value'}, + ':ttl': {'N': '12345.67'}, + }, + ReturnValues='ALL_NEW', + ) + + update_if_attr_doesnt_exist() + + # Second time should fail + with assert_raises(client.exceptions.ConditionalCheckFailedException): + update_if_attr_doesnt_exist() + + +@mock_dynamodb2 +def test_condition_expression__or_order(): + client = boto3.client('dynamodb', region_name='us-east-1') + + client.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'forum_name', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'forum_name', 'AttributeType': 'S'}, + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + ) + + # ensure that the RHS of the OR expression is not evaluated if the LHS + # returns true (as it would result an error) + client.update_item( + TableName='test', + Key={ + 'forum_name': {'S': 'the-key'}, + }, + UpdateExpression='set #ttl=:ttl', + ConditionExpression='attribute_not_exists(#ttl) OR #ttl <= :old_ttl', + ExpressionAttributeNames={'#ttl': 'ttl'}, + ExpressionAttributeValues={ + ':ttl': {'N': '6'}, + ':old_ttl': {'N': '5'}, + } + ) + + +@mock_dynamodb2 +def test_query_gsi_with_range_key(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_hash_key', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_range_key', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_hash_key', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'gsi_range_key', + 'KeyType': 'RANGE' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': 'test1'}, + 'gsi_hash_key': {'S': 'key1'}, + 'gsi_range_key': {'S': 'range1'}, + } + ) + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': 'test2'}, + 'gsi_hash_key': {'S': 'key1'}, + } + ) + + res = dynamodb.query(TableName='test', IndexName='test_gsi', + KeyConditionExpression='gsi_hash_key = :gsi_hash_key AND gsi_range_key = :gsi_range_key', + ExpressionAttributeValues={ + ':gsi_hash_key': {'S': 'key1'}, + ':gsi_range_key': {'S': 'range1'} + }) + res.should.have.key("Count").equal(1) + res.should.have.key("Items") + res['Items'][0].should.equal({ + 'id': {'S': 'test1'}, + 'gsi_hash_key': {'S': 'key1'}, + 'gsi_range_key': {'S': 'range1'}, + }) + + +@mock_dynamodb2 +def test_scan_by_non_exists_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_col', + 'KeyType': 'HASH' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + with assert_raises(ClientError) as ex: + dynamodb.scan(TableName='test', IndexName='non_exists_index') + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'The table does not have the specified index: non_exists_index' + ) diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index a9ab298b7..e64d7d196 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1344,6 +1344,34 @@ def test_update_item_add_value_string_set(): 'subject': '123', }) +@mock_dynamodb2 +def test_update_item_delete_value_string_set(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'string_set': set(['str1', 'str2']), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'string_set': { + 'Action': u'DELETE', + 'Value': set(['str2']), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'string_set': set(['str1']), + 'forum_name': 'the-key', + 'subject': '123', + }) @mock_dynamodb2 def test_update_item_add_value_does_not_exist_is_created(): @@ -1961,3 +1989,113 @@ def test_query_pagination(): results = page1['Items'] + page2['Items'] subjects = set([int(r['subject']) for r in results]) subjects.should.equal(set(range(10))) + + +@mock_dynamodb2 +def test_scan_by_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'range_key', 'KeyType': 'RANGE'}, + ], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'range_key', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_range_key', 'AttributeType': 'S'}, + {'AttributeName': 'lsi_range_key', 'AttributeType': 'S'}, + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + {'AttributeName': 'gsi_col', 'KeyType': 'HASH'}, + {'AttributeName': 'gsi_range_key', 'KeyType': 'RANGE'}, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ], + LocalSecondaryIndexes=[ + { + 'IndexName': 'test_lsi', + 'KeySchema': [ + {'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'lsi_range_key', 'KeyType': 'RANGE'}, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'range_key': {'S': '1'}, + 'col1': {'S': 'val1'}, + 'gsi_col': {'S': '1'}, + 'gsi_range_key': {'S': '1'}, + 'lsi_range_key': {'S': '1'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'range_key': {'S': '2'}, + 'col1': {'S': 'val2'}, + 'gsi_col': {'S': '1'}, + 'gsi_range_key': {'S': '2'}, + 'lsi_range_key': {'S': '2'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '3'}, + 'range_key': {'S': '1'}, + 'col1': {'S': 'val3'}, + } + ) + + res = dynamodb.scan(TableName='test') + assert res['Count'] == 3 + assert len(res['Items']) == 3 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['gsi_col']['S'] == '1' + assert last_eval_key['gsi_range_key']['S'] == '1' + + res = dynamodb.scan(TableName='test', IndexName='test_lsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_lsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['range_key']['S'] == '1' + assert last_eval_key['lsi_range_key']['S'] == '1' diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 5e635d5ef..1880c7cab 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -596,7 +596,50 @@ def test_boto3_conditions(): @mock_dynamodb2 -def test_boto3_put_item_conditions_fails(): +def test_boto3_put_item_conditions_pass(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'EQ', + 'AttributeValueList': ['bar'] + } + }) + final_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(final_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'whatever': { + 'ComparisonOperator': 'NULL', + } + }) + final_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(final_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'NOT_NULL', + } + }) + final_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(final_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_fail(): table = _create_user_table() table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) table.put_item.when.called_with( @@ -609,7 +652,7 @@ def test_boto3_put_item_conditions_fails(): }).should.throw(botocore.client.ClientError) @mock_dynamodb2 -def test_boto3_update_item_conditions_fails(): +def test_boto3_update_item_conditions_fail(): table = _create_user_table() table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) table.update_item.when.called_with( @@ -622,7 +665,7 @@ def test_boto3_update_item_conditions_fails(): }).should.throw(botocore.client.ClientError) @mock_dynamodb2 -def test_boto3_update_item_conditions_fails_because_expect_not_exists(): +def test_boto3_update_item_conditions_fail_because_expect_not_exists(): table = _create_user_table() table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) table.update_item.when.called_with( @@ -634,6 +677,19 @@ def test_boto3_update_item_conditions_fails_because_expect_not_exists(): } }).should.throw(botocore.client.ClientError) +@mock_dynamodb2 +def test_boto3_update_item_conditions_fail_because_expect_not_exists_by_compare_to_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) + table.update_item.when.called_with( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=bar', + Expected={ + 'foo': { + 'ComparisonOperator': 'NULL', + } + }).should.throw(botocore.client.ClientError) + @mock_dynamodb2 def test_boto3_update_item_conditions_pass(): table = _create_user_table() @@ -650,7 +706,7 @@ def test_boto3_update_item_conditions_pass(): assert dict(returned_item)['Item']['foo'].should.equal("baz") @mock_dynamodb2 -def test_boto3_update_item_conditions_pass_because_expext_not_exists(): +def test_boto3_update_item_conditions_pass_because_expect_not_exists(): table = _create_user_table() table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) table.update_item( @@ -664,6 +720,77 @@ def test_boto3_update_item_conditions_pass_because_expext_not_exists(): returned_item = table.get_item(Key={'username': 'johndoe'}) assert dict(returned_item)['Item']['foo'].should.equal("baz") +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'whatever': { + 'ComparisonOperator': 'NULL', + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'foo': { + 'ComparisonOperator': 'NOT_NULL', + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + + +@mock_dynamodb2 +def test_boto3_update_settype_item_with_conditions(): + class OrderedSet(set): + """A set with predictable iteration order""" + def __init__(self, values): + super(OrderedSet, self).__init__(values) + self.__ordered_values = values + + def __iter__(self): + return iter(self.__ordered_values) + + table = _create_user_table() + table.put_item(Item={'username': 'johndoe'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=:new_value', + ExpressionAttributeValues={ + ':new_value': OrderedSet(['hello', 'world']), + }, + ) + + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=:new_value', + ExpressionAttributeValues={ + ':new_value': set(['baz']), + }, + Expected={ + 'foo': { + 'ComparisonOperator': 'EQ', + 'AttributeValueList': [ + OrderedSet(['world', 'hello']), # Opposite order to original + ], + } + }, + ) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal(set(['baz'])) + + @mock_dynamodb2 def test_boto3_put_item_conditions_pass(): table = _create_user_table() @@ -702,3 +829,77 @@ def test_scan_pagination(): results = page1['Items'] + page2['Items'] usernames = set([r['username'] for r in results]) usernames.should.equal(set(expected_usernames)) + + +@mock_dynamodb2 +def test_scan_by_index(): + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + + dynamodb.create_table( + TableName='test', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[ + {'AttributeName': 'id', 'AttributeType': 'S'}, + {'AttributeName': 'gsi_col', 'AttributeType': 'S'} + ], + ProvisionedThroughput={'ReadCapacityUnits': 1, 'WriteCapacityUnits': 1}, + GlobalSecondaryIndexes=[ + { + 'IndexName': 'test_gsi', + 'KeySchema': [ + { + 'AttributeName': 'gsi_col', + 'KeyType': 'HASH' + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + }, + ] + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '1'}, + 'col1': {'S': 'val1'}, + 'gsi_col': {'S': 'gsi_val1'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '2'}, + 'col1': {'S': 'val2'}, + 'gsi_col': {'S': 'gsi_val2'}, + } + ) + + dynamodb.put_item( + TableName='test', + Item={ + 'id': {'S': '3'}, + 'col1': {'S': 'val3'}, + } + ) + + res = dynamodb.scan(TableName='test') + assert res['Count'] == 3 + assert len(res['Items']) == 3 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi') + assert res['Count'] == 2 + assert len(res['Items']) == 2 + + res = dynamodb.scan(TableName='test', IndexName='test_gsi', Limit=1) + assert res['Count'] == 1 + assert len(res['Items']) == 1 + last_eval_key = res['LastEvaluatedKey'] + assert last_eval_key['id']['S'] == '1' + assert last_eval_key['gsi_col']['S'] == 'gsi_val1' diff --git a/tests/test_dynamodbstreams/test_dynamodbstreams.py b/tests/test_dynamodbstreams/test_dynamodbstreams.py new file mode 100644 index 000000000..b60c21053 --- /dev/null +++ b/tests/test_dynamodbstreams/test_dynamodbstreams.py @@ -0,0 +1,234 @@ +from __future__ import unicode_literals, print_function + +from nose.tools import assert_raises + +import boto3 +from moto import mock_dynamodb2, mock_dynamodbstreams + + +class TestCore(): + stream_arn = None + mocks = [] + + def setup(self): + self.mocks = [mock_dynamodb2(), mock_dynamodbstreams()] + for m in self.mocks: + m.start() + + # create a table with a stream + conn = boto3.client('dynamodb', region_name='us-east-1') + + resp = conn.create_table( + TableName='test-streams', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', + 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1}, + StreamSpecification={ + 'StreamEnabled': True, + 'StreamViewType': 'NEW_AND_OLD_IMAGES' + } + ) + self.stream_arn = resp['TableDescription']['LatestStreamArn'] + + def teardown(self): + conn = boto3.client('dynamodb', region_name='us-east-1') + conn.delete_table(TableName='test-streams') + self.stream_arn = None + + for m in self.mocks: + m.stop() + + + def test_verify_stream(self): + conn = boto3.client('dynamodb', region_name='us-east-1') + resp = conn.describe_table(TableName='test-streams') + assert 'LatestStreamArn' in resp['Table'] + + def test_describe_stream(self): + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.describe_stream(StreamArn=self.stream_arn) + assert 'StreamDescription' in resp + desc = resp['StreamDescription'] + assert desc['StreamArn'] == self.stream_arn + assert desc['TableName'] == 'test-streams' + + def test_list_streams(self): + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.list_streams() + assert resp['Streams'][0]['StreamArn'] == self.stream_arn + + resp = conn.list_streams(TableName='no-stream') + assert not resp['Streams'] + + def test_get_shard_iterator(self): + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.describe_stream(StreamArn=self.stream_arn) + shard_id = resp['StreamDescription']['Shards'][0]['ShardId'] + + resp = conn.get_shard_iterator( + StreamArn=self.stream_arn, + ShardId=shard_id, + ShardIteratorType='TRIM_HORIZON' + ) + assert 'ShardIterator' in resp + + def test_get_records_empty(self): + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.describe_stream(StreamArn=self.stream_arn) + shard_id = resp['StreamDescription']['Shards'][0]['ShardId'] + + resp = conn.get_shard_iterator( + StreamArn=self.stream_arn, + ShardId=shard_id, + ShardIteratorType='LATEST' + ) + iterator_id = resp['ShardIterator'] + + resp = conn.get_records(ShardIterator=iterator_id) + assert 'Records' in resp + assert len(resp['Records']) == 0 + + def test_get_records_seq(self): + conn = boto3.client('dynamodb', region_name='us-east-1') + + conn.put_item( + TableName='test-streams', + Item={ + 'id': {'S': 'entry1'}, + 'first_col': {'S': 'foo'} + } + ) + conn.put_item( + TableName='test-streams', + Item={ + 'id': {'S': 'entry1'}, + 'first_col': {'S': 'bar'}, + 'second_col': {'S': 'baz'} + } + ) + conn.delete_item( + TableName='test-streams', + Key={'id': {'S': 'entry1'}} + ) + + conn = boto3.client('dynamodbstreams', region_name='us-east-1') + + resp = conn.describe_stream(StreamArn=self.stream_arn) + shard_id = resp['StreamDescription']['Shards'][0]['ShardId'] + + resp = conn.get_shard_iterator( + StreamArn=self.stream_arn, + ShardId=shard_id, + ShardIteratorType='TRIM_HORIZON' + ) + iterator_id = resp['ShardIterator'] + + resp = conn.get_records(ShardIterator=iterator_id) + assert len(resp['Records']) == 3 + assert resp['Records'][0]['eventName'] == 'INSERT' + assert resp['Records'][1]['eventName'] == 'MODIFY' + assert resp['Records'][2]['eventName'] == 'DELETE' + + # now try fetching from the next shard iterator, it should be + # empty + resp = conn.get_records(ShardIterator=resp['NextShardIterator']) + assert len(resp['Records']) == 0 + + +class TestEdges(): + mocks = [] + + def setup(self): + self.mocks = [mock_dynamodb2(), mock_dynamodbstreams()] + for m in self.mocks: + m.start() + + def teardown(self): + for m in self.mocks: + m.stop() + + + def test_enable_stream_on_table(self): + conn = boto3.client('dynamodb', region_name='us-east-1') + resp = conn.create_table( + TableName='test-streams', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}], + AttributeDefinitions=[{'AttributeName': 'id', + 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1} + ) + assert 'StreamSpecification' not in resp['TableDescription'] + + resp = conn.update_table( + TableName='test-streams', + StreamSpecification={ + 'StreamViewType': 'KEYS_ONLY' + } + ) + assert 'StreamSpecification' in resp['TableDescription'] + assert resp['TableDescription']['StreamSpecification'] == { + 'StreamEnabled': True, + 'StreamViewType': 'KEYS_ONLY' + } + assert 'LatestStreamLabel' in resp['TableDescription'] + + # now try to enable it again + with assert_raises(conn.exceptions.ResourceInUseException): + resp = conn.update_table( + TableName='test-streams', + StreamSpecification={ + 'StreamViewType': 'OLD_IMAGES' + } + ) + + def test_stream_with_range_key(self): + dyn = boto3.client('dynamodb', region_name='us-east-1') + + resp = dyn.create_table( + TableName='test-streams', + KeySchema=[{'AttributeName': 'id', 'KeyType': 'HASH'}, + {'AttributeName': 'color', 'KeyType': 'RANGE'}], + AttributeDefinitions=[{'AttributeName': 'id', + 'AttributeType': 'S'}, + {'AttributeName': 'color', + 'AttributeType': 'S'}], + ProvisionedThroughput={'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1}, + StreamSpecification={ + 'StreamViewType': 'NEW_IMAGES' + } + ) + stream_arn = resp['TableDescription']['LatestStreamArn'] + + streams = boto3.client('dynamodbstreams', region_name='us-east-1') + resp = streams.describe_stream(StreamArn=stream_arn) + shard_id = resp['StreamDescription']['Shards'][0]['ShardId'] + + resp = streams.get_shard_iterator( + StreamArn=stream_arn, + ShardId=shard_id, + ShardIteratorType='LATEST' + ) + iterator_id = resp['ShardIterator'] + + dyn.put_item( + TableName='test-streams', + Item={'id': {'S': 'row1'}, 'color': {'S': 'blue'}} + ) + dyn.put_item( + TableName='test-streams', + Item={'id': {'S': 'row2'}, 'color': {'S': 'green'}} + ) + + resp = streams.get_records(ShardIterator=iterator_id) + assert len(resp['Records']) == 2 + assert resp['Records'][0]['eventName'] == 'INSERT' + assert resp['Records'][1]['eventName'] == 'INSERT' + diff --git a/moto/packages/responses/__init__.py b/tests/test_ec2/__init__.py similarity index 100% rename from moto/packages/responses/__init__.py rename to tests/test_ec2/__init__.py diff --git a/tests/test_ec2/helpers.py b/tests/test_ec2/helpers.py new file mode 100644 index 000000000..94c9c10cb --- /dev/null +++ b/tests/test_ec2/helpers.py @@ -0,0 +1,15 @@ +import six + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa + + +def rsa_check_private_key(private_key_material): + assert isinstance(private_key_material, six.string_types) + + private_key = serialization.load_pem_private_key( + data=private_key_material.encode('ascii'), + backend=default_backend(), + password=None) + assert isinstance(private_key, rsa.RSAPrivateKey) diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py old mode 100755 new mode 100644 index 1029ba39e..fd7234511 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -1,23 +1,27 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises import boto -import boto3 import boto.ec2 import boto3 -from boto.exception import EC2ResponseError, EC2ResponseError - +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +# Ensure 'assert_raises' context manager support for Python 2.6 +from nose.tools import assert_raises import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 +from moto.ec2.models import AMIS from tests.helpers import requires_boto_gte @mock_ec2_deprecated def test_ami_create_and_delete(): conn = boto.connect_ec2('the_key', 'the_secret') + + initial_ami_count = len(AMIS) + conn.get_all_volumes().should.have.length_of(0) + conn.get_all_snapshots().should.have.length_of(initial_ami_count) + reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] @@ -32,33 +36,34 @@ def test_ami_create_and_delete(): image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") all_images = conn.get_all_images() - image = all_images[0] + set([i.id for i in all_images]).should.contain(image_id) - image.id.should.equal(image_id) - image.virtualization_type.should.equal(instance.virtualization_type) - image.architecture.should.equal(instance.architecture) - image.kernel_id.should.equal(instance.kernel) - image.platform.should.equal(instance.platform) - image.creationDate.should_not.be.none + retrieved_image = [i for i in all_images if i.id == image_id][0] + + retrieved_image.id.should.equal(image_id) + retrieved_image.virtualization_type.should.equal(instance.virtualization_type) + retrieved_image.architecture.should.equal(instance.architecture) + retrieved_image.kernel_id.should.equal(instance.kernel) + retrieved_image.platform.should.equal(instance.platform) + retrieved_image.creationDate.should_not.be.none instance.terminate() - # Validate auto-created volume and snapshot + # Ensure we're no longer creating a volume volumes = conn.get_all_volumes() - volumes.should.have.length_of(1) - volume = volumes[0] + volumes.should.have.length_of(0) + # Validate auto-created snapshot snapshots = conn.get_all_snapshots() - snapshots.should.have.length_of(1) - snapshot = snapshots[0] + snapshots.should.have.length_of(initial_ami_count + 1) - image.block_device_mapping.current_value.snapshot_id.should.equal( - snapshot.id) + retrieved_image_snapshot_id = retrieved_image.block_device_mapping.current_value.snapshot_id + [s.id for s in snapshots].should.contain(retrieved_image_snapshot_id) + snapshot = [s for s in snapshots if s.id == retrieved_image_snapshot_id][0] snapshot.description.should.equal( - "Auto-created snapshot for AMI {0}".format(image.id)) - snapshot.volume_id.should.equal(volume.id) + "Auto-created snapshot for AMI {0}".format(retrieved_image.id)) # root device should be in AMI's block device mappings - root_mapping = image.block_device_mapping.get(image.root_device_name) + root_mapping = retrieved_image.block_device_mapping.get(retrieved_image.root_device_name) root_mapping.should_not.be.none # Deregister @@ -83,6 +88,11 @@ def test_ami_create_and_delete(): @mock_ec2_deprecated def test_ami_copy(): conn = boto.ec2.connect_to_region("us-west-1") + + initial_ami_count = len(AMIS) + conn.get_all_volumes().should.have.length_of(0) + conn.get_all_snapshots().should.have.length_of(initial_ami_count) + reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] @@ -95,7 +105,8 @@ def test_ami_copy(): # the image_id to fetch the full info. with assert_raises(EC2ResponseError) as ex: copy_image_ref = conn.copy_image( - source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", dry_run=True) + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", + dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal( @@ -113,9 +124,11 @@ def test_ami_copy(): copy_image.kernel_id.should.equal(source_image.kernel_id) copy_image.platform.should.equal(source_image.platform) - # Validate auto-created volume and snapshot - conn.get_all_volumes().should.have.length_of(2) - conn.get_all_snapshots().should.have.length_of(2) + # Ensure we're no longer creating a volume + conn.get_all_volumes().should.have.length_of(0) + + # Validate auto-created snapshot + conn.get_all_snapshots().should.have.length_of(initial_ami_count + 2) copy_image.block_device_mapping.current_value.snapshot_id.should_not.equal( source_image.block_device_mapping.current_value.snapshot_id) @@ -217,7 +230,8 @@ def test_ami_filters(): amis_by_architecture = conn.get_all_images( filters={'architecture': 'x86_64'}) - set([ami.id for ami in amis_by_architecture]).should.equal(set([imageB.id])) + set([ami.id for ami in amis_by_architecture]).should.contain(imageB.id) + len(amis_by_architecture).should.equal(35) amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'}) set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id])) @@ -225,26 +239,32 @@ def test_ami_filters(): amis_by_virtualization = conn.get_all_images( filters={'virtualization-type': 'paravirtual'}) set([ami.id for ami in amis_by_virtualization] - ).should.equal(set([imageB.id])) + ).should.contain(imageB.id) + len(amis_by_virtualization).should.equal(3) amis_by_platform = conn.get_all_images(filters={'platform': 'windows'}) - set([ami.id for ami in amis_by_platform]).should.equal(set([imageA.id])) + set([ami.id for ami in amis_by_platform]).should.contain(imageA.id) + len(amis_by_platform).should.equal(24) amis_by_id = conn.get_all_images(filters={'image-id': imageA.id}) set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id])) amis_by_state = conn.get_all_images(filters={'state': 'available'}) - set([ami.id for ami in amis_by_state]).should.equal( - set([imageA.id, imageB.id])) + ami_ids_by_state = [ami.id for ami in amis_by_state] + ami_ids_by_state.should.contain(imageA.id) + ami_ids_by_state.should.contain(imageB.id) + len(amis_by_state).should.equal(36) amis_by_name = conn.get_all_images(filters={'name': imageA.name}) set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id])) - amis_by_public = conn.get_all_images(filters={'is-public': True}) - set([ami.id for ami in amis_by_public]).should.equal(set([imageB.id])) + amis_by_public = conn.get_all_images(filters={'is-public': 'true'}) + set([ami.id for ami in amis_by_public]).should.contain(imageB.id) + len(amis_by_public).should.equal(35) - amis_by_nonpublic = conn.get_all_images(filters={'is-public': False}) - set([ami.id for ami in amis_by_nonpublic]).should.equal(set([imageA.id])) + amis_by_nonpublic = conn.get_all_images(filters={'is-public': 'false'}) + set([ami.id for ami in amis_by_nonpublic]).should.contain(imageA.id) + len(amis_by_nonpublic).should.equal(1) @mock_ec2_deprecated @@ -427,18 +447,17 @@ def test_ami_attribute_user_permissions(): **REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) -@mock_ec2_deprecated +@mock_ec2 def test_ami_describe_executable_users(): conn = boto3.client('ec2', region_name='us-east-1') ec2 = boto3.resource('ec2', 'us-east-1') ec2.create_instances(ImageId='', MinCount=1, MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name','Values': ['running']}]) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] image_id = conn.create_image(InstanceId=instance_id, - Name='TestImage',)['ImageId'] - + Name='TestImage', )['ImageId'] USER1 = '123456789011' @@ -460,19 +479,18 @@ def test_ami_describe_executable_users(): images[0]['ImageId'].should.equal(image_id) -@mock_ec2_deprecated +@mock_ec2 def test_ami_describe_executable_users_negative(): conn = boto3.client('ec2', region_name='us-east-1') ec2 = boto3.resource('ec2', 'us-east-1') ec2.create_instances(ImageId='', MinCount=1, MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name','Values': ['running']}]) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] image_id = conn.create_image(InstanceId=instance_id, Name='TestImage')['ImageId'] - USER1 = '123456789011' USER2 = '113355789012' @@ -481,6 +499,7 @@ def test_ami_describe_executable_users_negative(): 'OperationType': 'add', 'UserIds': [USER1]} + # Add users and get no images # Add users and get no images conn.modify_image_attribute(**ADD_USER_ARGS) @@ -493,18 +512,17 @@ def test_ami_describe_executable_users_negative(): images.should.have.length_of(0) -@mock_ec2_deprecated +@mock_ec2 def test_ami_describe_executable_users_and_filter(): conn = boto3.client('ec2', region_name='us-east-1') ec2 = boto3.resource('ec2', 'us-east-1') ec2.create_instances(ImageId='', MinCount=1, MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name','Values': ['running']}]) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] image_id = conn.create_image(InstanceId=instance_id, - Name='ImageToDelete',)['ImageId'] - + Name='ImageToDelete', )['ImageId'] USER1 = '123456789011' @@ -667,12 +685,34 @@ def test_ami_attribute_error_cases(): @mock_ec2 -def test_ami_filter_wildcard(): +def test_ami_describe_non_existent(): ec2 = boto3.resource('ec2', region_name='us-west-1') - instance = ec2.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] - image = instance.create_image(Name='test-image') - filter_result = list(ec2.images.filter(Owners=['111122223333'], Filters=[{'Name':'name', 'Values':['test*']}])) - assert filter_result == [image] + # Valid pattern but non-existent id + img = ec2.Image('ami-abcd1234') + with assert_raises(ClientError): + img.load() + # Invalid ami pattern + img = ec2.Image('not_an_ami_id') + with assert_raises(ClientError): + img.load() + + +@mock_ec2 +def test_ami_filter_wildcard(): + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') + + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') + + # create an image with the same owner but will not match the filter + instance.create_image(Name='not-matching-image') + + my_images = ec2_client.describe_images( + Owners=['111122223333'], + Filters=[{'Name': 'name', 'Values': ['test*']}] + )['Images'] + my_images.should.have.length_of(1) @mock_ec2 @@ -691,3 +731,46 @@ def test_ami_filter_by_owner_id(): assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id # Check we actually have a subset of images assert len(ubuntu_ids) < len(all_ids) + + +@mock_ec2 +def test_ami_filter_by_self(): + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') + + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(0) + + # Create a new image + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') + + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(1) + + +@mock_ec2 +def test_ami_snapshots_have_correct_owner(): + ec2_client = boto3.client('ec2', region_name='us-west-1') + + images_response = ec2_client.describe_images() + + owner_id_to_snapshot_ids = {} + for image in images_response['Images']: + owner_id = image['OwnerId'] + snapshot_ids = [ + block_device_mapping['Ebs']['SnapshotId'] + for block_device_mapping in image['BlockDeviceMappings'] + ] + existing_snapshot_ids = owner_id_to_snapshot_ids.get(owner_id, []) + owner_id_to_snapshot_ids[owner_id] = ( + existing_snapshot_ids + snapshot_ids + ) + + for owner_id in owner_id_to_snapshot_ids: + snapshots_rseponse = ec2_client.describe_snapshots( + SnapshotIds=owner_id_to_snapshot_ids[owner_id] + ) + + for snapshot in snapshots_rseponse['Snapshots']: + assert owner_id == snapshot['OwnerId'] diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index 7226cacaf..c64f075ca 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -36,6 +36,11 @@ def test_boto3_describe_regions(): for rec in resp['Regions']: rec['Endpoint'].should.contain(rec['RegionName']) + test_region = 'us-east-1' + resp = ec2.describe_regions(RegionNames=[test_region]) + resp['Regions'].should.have.length_of(1) + resp['Regions'][0].should.have.key('RegionName').which.should.equal(test_region) + @mock_ec2 def test_boto3_availability_zones(): diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 9c07f38d6..ab5b31ba0 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -5,15 +5,18 @@ from nose.tools import assert_raises from moto.ec2 import ec2_backends import boto +import boto3 +from botocore.exceptions import ClientError from boto.exception import EC2ResponseError +from freezegun import freeze_time import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated def test_create_and_delete_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") all_volumes = conn.get_all_volumes() @@ -49,7 +52,7 @@ def test_create_and_delete_volume(): @mock_ec2_deprecated def test_create_encrypted_volume_dryrun(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") with assert_raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') @@ -60,7 +63,7 @@ def test_create_encrypted_volume_dryrun(): @mock_ec2_deprecated def test_create_encrypted_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) with assert_raises(EC2ResponseError) as ex: @@ -76,7 +79,7 @@ def test_create_encrypted_volume(): @mock_ec2_deprecated def test_filter_volume_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(80, "us-east-1a") volume2 = conn.create_volume(36, "us-east-1b") volume3 = conn.create_volume(20, "us-east-1c") @@ -96,7 +99,7 @@ def test_filter_volume_by_id(): @mock_ec2_deprecated def test_volume_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] @@ -185,10 +188,15 @@ def test_volume_filters(): {volume1.id, volume3.id, volume4.id} ) + volumes_by_availability_zone = conn.get_all_volumes(filters={'availability-zone': 'us-east-1b'}) + set([vol.id for vol in volumes_by_availability_zone if vol.id in volume_ids]).should.equal( + {volume2.id} + ) + @mock_ec2_deprecated def test_volume_attach_and_detach(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] volume = conn.create_volume(80, "us-east-1a") @@ -244,7 +252,7 @@ def test_volume_attach_and_detach(): @mock_ec2_deprecated def test_create_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") with assert_raises(EC2ResponseError) as ex: @@ -283,7 +291,7 @@ def test_create_snapshot(): @mock_ec2_deprecated def test_create_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) snapshot = volume.create_snapshot('a test snapshot') snapshot.update() @@ -298,7 +306,7 @@ def test_create_encrypted_snapshot(): @mock_ec2_deprecated def test_filter_snapshot_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(36, "us-east-1a") snap1 = volume1.create_snapshot('a test snapshot 1') volume2 = conn.create_volume(42, 'us-east-1a') @@ -325,7 +333,7 @@ def test_filter_snapshot_by_id(): @mock_ec2_deprecated def test_snapshot_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) volume2 = conn.create_volume(25, "us-east-1a", encrypted=True) @@ -386,12 +394,17 @@ def test_snapshot_filters(): set([snap.id for snap in snapshots_by_encrypted] ).should.equal({snapshot3.id}) + snapshots_by_owner_id = conn.get_all_snapshots( + filters={'owner-id': '123456789012'}) + set([snap.id for snap in snapshots_by_owner_id] + ).should.equal({snapshot1.id, snapshot2.id, snapshot3.id}) + @mock_ec2_deprecated def test_snapshot_attribute(): import copy - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot() @@ -494,7 +507,7 @@ def test_snapshot_attribute(): @mock_ec2_deprecated def test_create_volume_from_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a") snapshot = volume.create_snapshot('a test snapshot') @@ -516,7 +529,7 @@ def test_create_volume_from_snapshot(): @mock_ec2_deprecated def test_create_volume_from_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") volume = conn.create_volume(80, "us-east-1a", encrypted=True) snapshot = volume.create_snapshot('a test snapshot') @@ -561,7 +574,7 @@ def test_modify_attribute_blockDeviceMapping(): @mock_ec2_deprecated def test_volume_tag_escaping(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") vol = conn.create_volume(10, 'us-east-1a') snapshot = conn.create_snapshot(vol.id, 'Desc') @@ -579,3 +592,91 @@ def test_volume_tag_escaping(): snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] dict(snaps[0].tags).should.equal({'key': ''}) + + +@mock_ec2 +def test_volume_property_hidden_when_no_tags_exist(): + ec2_client = boto3.client('ec2', region_name='us-east-1') + + volume_response = ec2_client.create_volume( + Size=10, + AvailabilityZone='us-east-1a' + ) + + volume_response.get('Tags').should.equal(None) + + +@freeze_time +@mock_ec2 +def test_copy_snapshot(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + dest_ec2_client = boto3.client('ec2', region_name='eu-west-2') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-1" + ) + + ec2 = boto3.resource('ec2', region_name='eu-west-1') + dest_ec2 = boto3.resource('ec2', region_name='eu-west-2') + + source = ec2.Snapshot(create_snapshot_response['SnapshotId']) + dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) + + attribs = ['data_encryption_key_id', 'encrypted', + 'kms_key_id', 'owner_alias', 'owner_id', + 'progress', 'state', 'state_message', + 'tags', 'volume_id', 'volume_size'] + + for attrib in attribs: + getattr(source, attrib).should.equal(getattr(dest, attrib)) + + # Copy from non-existent source ID. + with assert_raises(ClientError) as cm: + create_snapshot_error = ec2_client.create_snapshot( + VolumeId='vol-abcd1234' + ) + cm.exception.response['Error']['Code'].should.equal('InvalidVolume.NotFound') + cm.exception.response['Error']['Message'].should.equal("The volume 'vol-abcd1234' does not exist.") + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + # Copy from non-existent source region. + with assert_raises(ClientError) as cm: + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-2" + ) + cm.exception.response['Error']['Code'].should.equal('InvalidSnapshot.NotFound') + cm.exception.response['Error']['Message'].should.be.none + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + +@mock_ec2 +def test_search_for_many_snapshots(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + snapshot_ids = [] + for i in range(1, 20): + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + snapshot_ids.append(create_snapshot_response['SnapshotId']) + + snapshots_response = ec2_client.describe_snapshots( + SnapshotIds=snapshot_ids + ) + + assert len(snapshots_response['Snapshots']) == len(snapshot_ids) diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index 709bdc33b..ca6637b18 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -62,6 +62,17 @@ def test_eip_allocate_vpc(): logging.debug("vpc alloc_id:".format(vpc.allocation_id)) vpc.release() +@mock_ec2 +def test_specific_eip_allocate_vpc(): + """Allocate VPC EIP with specific address""" + service = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + vpc = client.allocate_address(Domain="vpc", Address="127.38.43.222") + vpc['Domain'].should.be.equal("vpc") + vpc['PublicIp'].should.be.equal("127.38.43.222") + logging.debug("vpc alloc_id:".format(vpc['AllocationId'])) + @mock_ec2_deprecated def test_eip_allocate_invalid_domain(): diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 828f9d917..05b45fda9 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -36,7 +36,8 @@ def test_elastic_network_interfaces(): all_enis.should.have.length_of(1) eni = all_enis[0] eni.groups.should.have.length_of(0) - eni.private_ip_addresses.should.have.length_of(0) + eni.private_ip_addresses.should.have.length_of(1) + eni.private_ip_addresses[0].private_ip_address.startswith('10.').should.be.true with assert_raises(EC2ResponseError) as ex: conn.delete_network_interface(eni.id, dry_run=True) @@ -160,7 +161,7 @@ def test_elastic_network_interfaces_filtering(): subnet.id, groups=[security_group1.id, security_group2.id]) eni2 = conn.create_network_interface( subnet.id, groups=[security_group1.id]) - eni3 = conn.create_network_interface(subnet.id) + eni3 = conn.create_network_interface(subnet.id, description='test description') all_enis = conn.get_all_network_interfaces() all_enis.should.have.length_of(3) @@ -188,6 +189,12 @@ def test_elastic_network_interfaces_filtering(): enis_by_group.should.have.length_of(1) set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id])) + # Filter by Description + enis_by_description = conn.get_all_network_interfaces( + filters={'description': eni3.description }) + enis_by_description.should.have.length_of(1) + enis_by_description[0].description.should.equal(eni3.description) + # Unsupported filter conn.get_all_network_interfaces.when.called_with( filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) @@ -342,6 +349,106 @@ def test_elastic_network_interfaces_get_by_subnet_id(): enis.should.have.length_of(0) +@mock_ec2 +def test_elastic_network_interfaces_get_by_description(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5', Description='test interface') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + filters = [{'Name': 'description', 'Values': [eni1.description]}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'description', 'Values': ['bad description']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2 +def test_elastic_network_interfaces_describe_network_interfaces_with_filter(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5', Description='test interface') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + # Filter by network-interface-id + response = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'network-interface-id', 'Values': [eni1.id]}]) + response['NetworkInterfaces'].should.have.length_of(1) + response['NetworkInterfaces'][0]['NetworkInterfaceId'].should.equal(eni1.id) + response['NetworkInterfaces'][0]['PrivateIpAddress'].should.equal(eni1.private_ip_address) + response['NetworkInterfaces'][0]['Description'].should.equal(eni1.description) + + response = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'network-interface-id', 'Values': ['bad-id']}]) + response['NetworkInterfaces'].should.have.length_of(0) + + # Filter by private-ip-address + response = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'private-ip-address', 'Values': [eni1.private_ip_address]}]) + response['NetworkInterfaces'].should.have.length_of(1) + response['NetworkInterfaces'][0]['NetworkInterfaceId'].should.equal(eni1.id) + response['NetworkInterfaces'][0]['PrivateIpAddress'].should.equal(eni1.private_ip_address) + response['NetworkInterfaces'][0]['Description'].should.equal(eni1.description) + + response = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'private-ip-address', 'Values': ['11.11.11.11']}]) + response['NetworkInterfaces'].should.have.length_of(0) + + # Filter by sunet-id + response = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'subnet-id', 'Values': [eni1.subnet.id]}]) + response['NetworkInterfaces'].should.have.length_of(1) + response['NetworkInterfaces'][0]['NetworkInterfaceId'].should.equal(eni1.id) + response['NetworkInterfaces'][0]['PrivateIpAddress'].should.equal(eni1.private_ip_address) + response['NetworkInterfaces'][0]['Description'].should.equal(eni1.description) + + response = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'subnet-id', 'Values': ['sn-bad-id']}]) + response['NetworkInterfaces'].should.have.length_of(0) + + # Filter by description + response = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'description', 'Values': [eni1.description]}]) + response['NetworkInterfaces'].should.have.length_of(1) + response['NetworkInterfaces'][0]['NetworkInterfaceId'].should.equal(eni1.id) + response['NetworkInterfaces'][0]['PrivateIpAddress'].should.equal(eni1.private_ip_address) + response['NetworkInterfaces'][0]['Description'].should.equal(eni1.description) + + response = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'description', 'Values': ['bad description']}]) + response['NetworkInterfaces'].should.have.length_of(0) + + # Filter by multiple filters + response = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'private-ip-address', 'Values': [eni1.private_ip_address]}, + {'Name': 'network-interface-id', 'Values': [eni1.id]}, + {'Name': 'subnet-id', 'Values': [eni1.subnet.id]}]) + response['NetworkInterfaces'].should.have.length_of(1) + response['NetworkInterfaces'][0]['NetworkInterfaceId'].should.equal(eni1.id) + response['NetworkInterfaces'][0]['PrivateIpAddress'].should.equal(eni1.private_ip_address) + response['NetworkInterfaces'][0]['Description'].should.equal(eni1.description) + + @mock_ec2_deprecated @mock_cloudformation_deprecated def test_elastic_network_interfaces_cloudformation(): @@ -354,9 +461,13 @@ def test_elastic_network_interfaces_cloudformation(): ) ec2_conn = boto.ec2.connect_to_region("us-west-1") eni = ec2_conn.get_all_network_interfaces()[0] + eni.private_ip_addresses.should.have.length_of(1) stack = conn.describe_stacks()[0] resources = stack.describe_resources() cfn_eni = [resource for resource in resources if resource.resource_type == 'AWS::EC2::NetworkInterface'][0] cfn_eni.physical_resource_id.should.equal(eni.id) + + outputs = {output.key: output.value for output in stack.outputs} + outputs['ENIIpAddress'].should.equal(eni.private_ip_addresses[0].private_ip_address) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 5cfe01618..a83384709 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 +from botocore.exceptions import ClientError + import tests.backport_assert_raises from nose.tools import assert_raises @@ -42,7 +44,7 @@ def test_add_servers(): @freeze_time("2014-01-01 05:00:00") @mock_ec2_deprecated def test_instance_launch_and_terminate(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") with assert_raises(EC2ResponseError) as ex: reservation = conn.run_instances('ami-1234abcd', dry_run=True) @@ -679,8 +681,8 @@ def test_modify_instance_attribute_security_groups(): reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] - sg_id = 'sg-1234abcd' - sg_id2 = 'sg-abcd4321' + sg_id = conn.create_security_group('test security group', 'this is a test security group').id + sg_id2 = conn.create_security_group('test security group 2', 'this is a test security group 2').id with assert_raises(EC2ResponseError) as ex: instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) @@ -820,7 +822,7 @@ def test_run_instance_with_instance_type(): @mock_ec2_deprecated def test_run_instance_with_default_placement(): - conn = boto.connect_ec2('the_key', 'the_secret') + conn = boto.ec2.connect_to_region("us-east-1") reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] @@ -1233,3 +1235,74 @@ def test_modify_delete_on_termination(): ) instance.load() instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(True) + +@mock_ec2 +def test_create_instance_ebs_optimized(): + ec2_resource = boto3.resource('ec2', region_name='eu-west-1') + + instance = ec2_resource.create_instances( + ImageId = 'ami-12345678', + MaxCount = 1, + MinCount = 1, + EbsOptimized = True, + )[0] + instance.load() + instance.ebs_optimized.should.be(True) + + instance.modify_attribute( + EbsOptimized={ + 'Value': False + } + ) + instance.load() + instance.ebs_optimized.should.be(False) + + +@mock_ec2 +def test_run_multiple_instances_in_same_command(): + instance_count = 4 + client = boto3.client('ec2', region_name='us-east-1') + client.run_instances(ImageId='ami-1234abcd', + MinCount=instance_count, + MaxCount=instance_count) + reservations = client.describe_instances()['Reservations'] + + reservations[0]['Instances'].should.have.length_of(instance_count) + + instances = reservations[0]['Instances'] + for i in range(0, instance_count): + instances[i]['AmiLaunchIndex'].should.be(i) + + +@mock_ec2 +def test_describe_instance_attribute(): + client = boto3.client('ec2', region_name='us-east-1') + security_group_id = client.create_security_group( + GroupName='test security group', Description='this is a test security group')['GroupId'] + client.run_instances(ImageId='ami-1234abcd', + MinCount=1, + MaxCount=1, + SecurityGroupIds=[security_group_id]) + instance_id = client.describe_instances()['Reservations'][0]['Instances'][0]['InstanceId'] + + valid_instance_attributes = ['instanceType', 'kernel', 'ramdisk', 'userData', 'disableApiTermination', 'instanceInitiatedShutdownBehavior', 'rootDeviceName', 'blockDeviceMapping', 'productCodes', 'sourceDestCheck', 'groupSet', 'ebsOptimized', 'sriovNetSupport'] + + for valid_instance_attribute in valid_instance_attributes: + response = client.describe_instance_attribute(InstanceId=instance_id, Attribute=valid_instance_attribute) + if valid_instance_attribute == "groupSet": + response.should.have.key("Groups") + response["Groups"].should.have.length_of(1) + response["Groups"][0]["GroupId"].should.equal(security_group_id) + elif valid_instance_attribute == "userData": + response.should.have.key("UserData") + response["UserData"].should.be.empty + + invalid_instance_attributes = ['abc', 'Kernel', 'RamDisk', 'userdata', 'iNsTaNcEtYpE'] + + for invalid_instance_attribute in invalid_instance_attributes: + with assert_raises(ClientError) as ex: + client.describe_instance_attribute(InstanceId=instance_id, Attribute=invalid_instance_attribute) + ex.exception.response['Error']['Code'].should.equal('InvalidParameterValue') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + message = 'Value ({invalid_instance_attribute}) for parameter attribute is invalid. Unknown attribute.'.format(invalid_instance_attribute=invalid_instance_attribute) + ex.exception.response['Error']['Message'].should.equal(message) diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index 5842621cd..3a1d0fda9 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -199,7 +199,7 @@ def test_igw_desribe(): @mock_ec2_deprecated -def test_igw_desribe_bad_id(): +def test_igw_describe_bad_id(): """ internet gateway fail to fetch by bad id """ conn = boto.connect_vpc('the_key', 'the_secret') with assert_raises(EC2ResponseError) as cm: diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 0a7fb9f76..dfe6eabdf 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -4,12 +4,46 @@ import tests.backport_assert_raises from nose.tools import assert_raises import boto -import six import sure # noqa from boto.exception import EC2ResponseError from moto import mock_ec2_deprecated +from .helpers import rsa_check_private_key + + +RSA_PUBLIC_KEY_OPENSSH = b"""\ +ssh-rsa \ +AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H\ +6cZANOQ+P1o/W4BdtcAL3sor4iGi7SOeJgo\8kweyMQrhrt6HaKGgromRiz37LQx\ +4YIAcBi4Zd023mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBp\ +JzbZlPN45ZCTk9ck0fSVHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6\ +A3t8mL7r91aM5q6QOQm219lctFM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2X\ +qusUO07jKuSxzPumXBeU+JEtx0J1tqZwJlpGt2R+0qN7nKnPl2+hx \ +moto@github.com""" + +RSA_PUBLIC_KEY_RFC4716 = b"""\ +---- BEGIN SSH2 PUBLIC KEY ---- +AAAAB3NzaC1yc2EAAAADAQABAAABAQDusXfgTE4eBP50NglSzCSEGnIL6+cr6m3H6cZANO +Q+P1o/W4BdtcAL3sor4iGi7SOeJgo8kweyMQrhrt6HaKGgromRiz37LQx4YIAcBi4Zd023 +mO/V7Rc2Chh18mWgLSmA6ng+j37ip6452zxtv0jHAz9pJolbKBpJzbZlPN45ZCTk9ck0fS +VHRl6VRSSPQcpqi65XpRf+35zNOCGCc1mAOOTmw59Q2a6A3t8mL7r91aM5q6QOQm219lct +FM8O7HRJnDgmhGpnjRwE1LyKktWTbgFZ4SNWU2XqusUO07jKuSxzPumXBeU+JEtx0J1tqZ +wJlpGt2R+0qN7nKnPl2+hx +---- END SSH2 PUBLIC KEY ---- +""" + +RSA_PUBLIC_KEY_FINGERPRINT = "6a:49:07:1c:7e:bd:d2:bd:96:25:fe:b5:74:83:ae:fd" + +DSA_PUBLIC_KEY_OPENSSH = b"""ssh-dss \ +AAAAB3NzaC1kc3MAAACBAJ0aXctVwbN6VB81gpo8R7DUk8zXRjZvrkg8Y8vEGt63gklpNJNsLXtEUXkl5D4c0nD2FZO1rJNqFoe\ +OQOCoGSfclHvt9w4yPl/lUEtb3Qtj1j80MInETHr19vaSunRk5R+M+8YH+LLcdYdz7MijuGey02mbi0H9K5nUIcuLMArVAAAAFQ\ +D0RDvsObRWBlnaW8645obZBM86jwAAAIBNZwf3B4krIzAwVfkMHLDSdAvs7lOWE7o8SJLzr9t4a9HhYp9SLbMzJ815KWfidEYV2\ ++s4ZaPCfcZ1GENFRbE8rixz5eMAjEUXEPMJkblDZTHzMsH96z2cOCQZ0vfOmgznsf18Uf725pqo9OqAioEsTJjX8jtI2qNPEBU0\ +uhMSZQAAAIBBMGhDu5CWPUlS2QG7vzmzw81XasmHE/s2YPDRbolkriwlunpgwZhCscoQP8HFHY+DLUVvUb+GZwBmFt4l1uHl03b\ +ffsm7UIHtCBYERr9Nx0u20ldfhkgB1lhaJb5o0ZJ3pmJ38KChfyHe5EUcqRdEFo89Mp72VI2Z6UHyL175RA== \ +moto@github.com""" + @mock_ec2_deprecated def test_key_pairs_empty(): @@ -33,14 +67,15 @@ def test_key_pairs_create(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - kp = conn.create_key_pair('foo', dry_run=True) + conn.create_key_pair('foo', dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + rsa_check_private_key(kp.material) + kps = conn.get_all_key_pairs() assert len(kps) == 1 assert kps[0].name == 'foo' @@ -49,13 +84,19 @@ def test_key_pairs_create(): @mock_ec2_deprecated def test_key_pairs_create_two(): conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - kp = conn.create_key_pair('bar') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + + kp1 = conn.create_key_pair('foo') + rsa_check_private_key(kp1.material) + + kp2 = conn.create_key_pair('bar') + rsa_check_private_key(kp2.material) + + assert kp1.material != kp2.material + kps = conn.get_all_key_pairs() kps.should.have.length_of(2) - [i.name for i in kps].should.contain('foo') - [i.name for i in kps].should.contain('bar') + assert {i.name for i in kps} == {'foo', 'bar'} + kps = conn.get_all_key_pairs('foo') kps.should.have.length_of(1) kps[0].name.should.equal('foo') @@ -64,8 +105,7 @@ def test_key_pairs_create_two(): @mock_ec2_deprecated def test_key_pairs_create_exist(): conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + conn.create_key_pair('foo') assert len(conn.get_all_key_pairs()) == 1 with assert_raises(EC2ResponseError) as cm: @@ -105,23 +145,30 @@ def test_key_pairs_import(): conn = boto.connect_ec2('the_key', 'the_secret') with assert_raises(EC2ResponseError) as ex: - kp = conn.import_key_pair('foo', b'content', dry_run=True) + conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH, dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') - kp = conn.import_key_pair('foo', b'content') - assert kp.name == 'foo' + kp1 = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH) + assert kp1.name == 'foo' + assert kp1.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT + + kp2 = conn.import_key_pair('foo2', RSA_PUBLIC_KEY_RFC4716) + assert kp2.name == 'foo2' + assert kp2.fingerprint == RSA_PUBLIC_KEY_FINGERPRINT + kps = conn.get_all_key_pairs() - assert len(kps) == 1 - assert kps[0].name == 'foo' + assert len(kps) == 2 + assert kps[0].name == kp1.name + assert kps[1].name == kp2.name @mock_ec2_deprecated def test_key_pairs_import_exist(): conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.import_key_pair('foo', b'content') + kp = conn.import_key_pair('foo', RSA_PUBLIC_KEY_OPENSSH) assert kp.name == 'foo' assert len(conn.get_all_key_pairs()) == 1 @@ -132,6 +179,32 @@ def test_key_pairs_import_exist(): cm.exception.request_id.should_not.be.none +@mock_ec2_deprecated +def test_key_pairs_invalid(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', b'') + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', b'garbage') + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + with assert_raises(EC2ResponseError) as ex: + conn.import_key_pair('foo', DSA_PUBLIC_KEY_OPENSSH) + ex.exception.error_code.should.equal('InvalidKeyPair.Format') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'Key is not in valid OpenSSH public key format') + + @mock_ec2_deprecated def test_key_pair_filters(): conn = boto.connect_ec2('the_key', 'the_secret') diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index fd2ec105e..1c69624bf 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -1,8 +1,11 @@ from __future__ import unicode_literals import boto +import boto3 import sure # noqa +from nose.tools import assert_raises +from botocore.exceptions import ClientError -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated @@ -27,12 +30,12 @@ def test_new_subnet_associates_with_default_network_acl(): conn = boto.connect_vpc('the_key', 'the secret') vpc = conn.get_all_vpcs()[0] - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + subnet = conn.create_subnet(vpc.id, "172.31.112.0/20") all_network_acls = conn.get_all_network_acls() all_network_acls.should.have.length_of(1) acl = all_network_acls[0] - acl.associations.should.have.length_of(4) + acl.associations.should.have.length_of(7) [a.subnet_id for a in acl.associations].should.contain(subnet.id) @@ -173,3 +176,77 @@ def test_network_acl_tagging(): if na.id == network_acl.id) test_network_acl.tags.should.have.length_of(1) test_network_acl.tags["a key"].should.equal("some value") + + +@mock_ec2 +def test_new_subnet_in_new_vpc_associates_with_default_network_acl(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + new_vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + new_vpc.reload() + + subnet = ec2.create_subnet(VpcId=new_vpc.id, CidrBlock='10.0.0.0/24') + subnet.reload() + + new_vpcs_default_network_acl = next(iter(new_vpc.network_acls.all()), None) + new_vpcs_default_network_acl.reload() + new_vpcs_default_network_acl.vpc_id.should.equal(new_vpc.id) + new_vpcs_default_network_acl.associations.should.have.length_of(1) + new_vpcs_default_network_acl.associations[0]['SubnetId'].should.equal(subnet.id) + + +@mock_ec2 +def test_default_network_acl_default_entries(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + default_network_acl.entries.should.have.length_of(4) + unique_entries = [] + for entry in default_network_acl.entries: + entry['CidrBlock'].should.equal('0.0.0.0/0') + entry['Protocol'].should.equal('-1') + entry['RuleNumber'].should.be.within([100, 32767]) + entry['RuleAction'].should.be.within(['allow', 'deny']) + assert type(entry['Egress']) is bool + if entry['RuleAction'] == 'allow': + entry['RuleNumber'].should.be.equal(100) + else: + entry['RuleNumber'].should.be.equal(32767) + if entry not in unique_entries: + unique_entries.append(entry) + + unique_entries.should.have.length_of(4) + + +@mock_ec2 +def test_delete_default_network_acl_default_entry(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + default_network_acl.entries.should.have.length_of(4) + first_default_network_acl_entry = default_network_acl.entries[0] + + default_network_acl.delete_entry(Egress=first_default_network_acl_entry['Egress'], + RuleNumber=first_default_network_acl_entry['RuleNumber']) + + default_network_acl.entries.should.have.length_of(3) + + +@mock_ec2 +def test_duplicate_network_acl_entry(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + default_network_acl = next(iter(ec2.network_acls.all()), None) + default_network_acl.is_default.should.be.ok + + rule_number = 200 + egress = True + default_network_acl.create_entry(CidrBlock="0.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="allow", RuleNumber=rule_number) + + with assert_raises(ClientError) as ex: + default_network_acl.create_entry(CidrBlock="10.0.0.0/0", Egress=egress, Protocol="-1", RuleAction="deny", RuleNumber=rule_number) + str(ex.exception).should.equal( + "An error occurred (NetworkAclEntryAlreadyExists) when calling the CreateNetworkAclEntry " + "operation: The network acl entry identified by {} already exists.".format(rule_number)) + + diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 1e87b253c..f94c78eaf 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -68,8 +68,10 @@ def test_create_autoscaling_group(): image_id='ami-abcd1234', instance_type='m1.small', ) - us_conn.create_launch_configuration(config) + x = us_conn.create_launch_configuration(config) + us_subnet_id = list(ec2_backends['us-east-1'].subnets['us-east-1c'].keys())[0] + ap_subnet_id = list(ec2_backends['ap-northeast-1'].subnets['ap-northeast-1a'].keys())[0] group = boto.ec2.autoscale.AutoScalingGroup( name='us_tester_group', availability_zones=['us-east-1c'], @@ -82,7 +84,7 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["us_test_lb"], placement_group="us_test_placement", - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=us_subnet_id, termination_policies=["OldestInstance", "NewestInstance"], ) us_conn.create_auto_scaling_group(group) @@ -107,7 +109,7 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["ap_test_lb"], placement_group="ap_test_placement", - vpc_zone_identifier='subnet-5678efgh', + vpc_zone_identifier=ap_subnet_id, termination_policies=["OldestInstance", "NewestInstance"], ) ap_conn.create_auto_scaling_group(group) @@ -121,7 +123,7 @@ def test_create_autoscaling_group(): us_group.desired_capacity.should.equal(2) us_group.max_size.should.equal(2) us_group.min_size.should.equal(2) - us_group.vpc_zone_identifier.should.equal('subnet-1234abcd') + us_group.vpc_zone_identifier.should.equal(us_subnet_id) us_group.launch_config_name.should.equal('us_tester') us_group.default_cooldown.should.equal(60) us_group.health_check_period.should.equal(100) @@ -137,7 +139,7 @@ def test_create_autoscaling_group(): ap_group.desired_capacity.should.equal(2) ap_group.max_size.should.equal(2) ap_group.min_size.should.equal(2) - ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh') + ap_group.vpc_zone_identifier.should.equal(ap_subnet_id) ap_group.launch_config_name.should.equal('ap_tester') ap_group.default_cooldown.should.equal(60) ap_group.health_check_period.should.equal(100) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index b27484468..de33b3f7a 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -6,6 +6,7 @@ from nose.tools import assert_raises import boto import boto3 from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError import sure # noqa from moto import mock_ec2, mock_ec2_deprecated @@ -528,3 +529,26 @@ def test_network_acl_tagging(): if na.id == route_table.id) test_route_table.tags.should.have.length_of(1) test_route_table.tags["a key"].should.equal("some value") + + +@mock_ec2 +def test_create_route_with_invalid_destination_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + route_table = ec2.create_route_table(VpcId=vpc.id) + route_table.reload() + + internet_gateway = ec2.create_internet_gateway() + vpc.attach_internet_gateway(InternetGatewayId=internet_gateway.id) + internet_gateway.reload() + + destination_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + route = route_table.create_route(DestinationCidrBlock=destination_cidr_block, GatewayId=internet_gateway.id) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateRoute " + "operation: Value ({}) for parameter destinationCidrBlock is invalid. This is not a valid CIDR block.".format(destination_cidr_block)) \ No newline at end of file diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index 0d7565a31..c09b1e8f4 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -501,7 +501,7 @@ def test_sec_group_rule_limit_vpc(): ec2_conn = boto.connect_ec2() vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc('10.0.0.0/8') + vpc = vpc_conn.create_vpc('10.0.0.0/16') sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) @@ -689,6 +689,31 @@ def test_authorize_and_revoke_in_bulk(): sg01.ip_permissions_egress.shouldnt.contain(ip_permission) +@mock_ec2 +def test_security_group_ingress_without_multirule(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Fails + assert len(sg.ip_permissions) == 1 + + +@mock_ec2 +def test_security_group_ingress_without_multirule_after_reload(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Also Fails + sg_after = ec2.SecurityGroup(sg.id) + assert len(sg_after.ip_permissions) == 1 + + @mock_ec2_deprecated def test_get_all_security_groups_filter_with_same_vpc_id(): conn = boto.connect_ec2('the_key', 'the_secret') diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index a8d33c299..6221d633f 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -7,7 +7,7 @@ from moto import mock_ec2 def get_subnet_id(conn): - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] @@ -54,7 +54,7 @@ def spot_config(subnet_id, allocation_strategy="lowestPrice"): }, 'EbsOptimized': False, 'WeightedCapacity': 2.0, - 'SpotPrice': '0.13' + 'SpotPrice': '0.13', }, { 'ImageId': 'ami-123', 'KeyName': 'my-key', @@ -148,6 +148,48 @@ def test_create_diversified_spot_fleet(): instances[0]['InstanceId'].should.contain("i-") +@mock_ec2 +def test_create_spot_fleet_request_with_tag_spec(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + tag_spec = [ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'tag-1', + 'Value': 'foo', + }, + { + 'Key': 'tag-2', + 'Value': 'bar', + }, + ] + }, + ] + config = spot_config(subnet_id) + config['LaunchSpecifications'][0]['TagSpecifications'] = tag_spec + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=config + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + spot_fleet_config = spot_fleet_requests[0]['SpotFleetRequestConfig'] + spot_fleet_config['LaunchSpecifications'][0]['TagSpecifications'][0][ + 'ResourceType'].should.equal('instance') + for tag in tag_spec[0]['Tags']: + spot_fleet_config['LaunchSpecifications'][0]['TagSpecifications'][0]['Tags'].should.contain(tag) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = conn.describe_instances(InstanceIds=[i['InstanceId'] for i in instance_res['ActiveInstances']]) + for instance in instances['Reservations'][0]['Instances']: + for tag in tag_spec[0]['Tags']: + instance['Tags'].should.contain(tag) + + @mock_ec2 def test_cancel_spot_fleet_request(): conn = boto3.client("ec2", region_name='us-west-2') @@ -316,3 +358,30 @@ def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate(): SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] spot_fleet_config['TargetCapacity'].should.equal(1) spot_fleet_config['FulfilledCapacity'].should.equal(2.0) + + +@mock_ec2 +def test_create_spot_fleet_without_spot_price(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + # remove prices to force a fallback to ondemand price + spot_config_without_price = spot_config(subnet_id) + del spot_config_without_price['SpotPrice'] + for spec in spot_config_without_price['LaunchSpecifications']: + del spec['SpotPrice'] + + spot_fleet_id = conn.request_spot_fleet(SpotFleetRequestConfig=spot_config_without_price)['SpotFleetRequestId'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] + launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] + + # AWS will figure out the price + assert 'SpotPrice' not in launch_spec1 + assert 'SpotPrice' not in launch_spec2 diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 05f8ee88f..ab08d392c 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -17,7 +17,7 @@ from moto.core.utils import iso_8601_datetime_with_milliseconds @mock_ec2 def test_request_spot_instances(): conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + vpc = conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc'] subnet = conn.create_subnet( VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] subnet_id = subnet['SubnetId'] diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 99e6d45d8..38c36f682 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -7,7 +7,7 @@ import boto3 import boto import boto.vpc from boto.exception import EC2ResponseError -from botocore.exceptions import ParamValidationError +from botocore.exceptions import ParamValidationError, ClientError import json import sure # noqa @@ -84,7 +84,7 @@ def test_default_subnet(): default_vpc.is_default.should.be.ok subnet = ec2.create_subnet( - VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') + VpcId=default_vpc.id, CidrBlock='172.31.48.0/20', AvailabilityZone='us-west-1a') subnet.reload() subnet.map_public_ip_on_launch.shouldnt.be.ok @@ -118,7 +118,7 @@ def test_boto3_non_default_subnet(): @mock_ec2 -def test_modify_subnet_attribute(): +def test_modify_subnet_attribute_public_ip_on_launch(): ec2 = boto3.resource('ec2', region_name='us-west-1') client = boto3.client('ec2', region_name='us-west-1') @@ -126,7 +126,7 @@ def test_modify_subnet_attribute(): vpc = list(ec2.vpcs.all())[0] subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + VpcId=vpc.id, CidrBlock="172.31.48.0/20", AvailabilityZone='us-west-1a') # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action subnet.reload() @@ -145,6 +145,34 @@ def test_modify_subnet_attribute(): subnet.map_public_ip_on_launch.should.be.ok +@mock_ec2 +def test_modify_subnet_attribute_assign_ipv6_address_on_creation(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + # Get the default VPC + vpc = list(ec2.vpcs.all())[0] + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='172.31.112.0/20', AvailabilityZone='us-west-1a') + + # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action + subnet.reload() + + # For non default subnet, attribute value should be 'False' + subnet.assign_ipv6_address_on_creation.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, AssignIpv6AddressOnCreation={'Value': False}) + subnet.reload() + subnet.assign_ipv6_address_on_creation.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, AssignIpv6AddressOnCreation={'Value': True}) + subnet.reload() + subnet.assign_ipv6_address_on_creation.should.be.ok + + @mock_ec2 def test_modify_subnet_attribute_validation(): ec2 = boto3.resource('ec2', region_name='us-west-1') @@ -289,3 +317,130 @@ def test_subnet_tags_through_cloudformation(): subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] subnet.tags["foo"].should.equal("bar") subnet.tags["blah"].should.equal("baz") + + +@mock_ec2 +def test_create_subnet_response_fields(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = client.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a')['Subnet'] + + subnet.should.have.key('AvailabilityZone') + subnet.should.have.key('AvailabilityZoneId') + subnet.should.have.key('AvailableIpAddressCount') + subnet.should.have.key('CidrBlock') + subnet.should.have.key('State') + subnet.should.have.key('SubnetId') + subnet.should.have.key('VpcId') + subnet.shouldnt.have.key('Tags') + subnet.should.have.key('DefaultForAz').which.should.equal(False) + subnet.should.have.key('MapPublicIpOnLaunch').which.should.equal(False) + subnet.should.have.key('OwnerId') + subnet.should.have.key('AssignIpv6AddressOnCreation').which.should.equal(False) + + subnet_arn = "arn:aws:ec2:{region}:{owner_id}:subnet/{subnet_id}".format(region=subnet['AvailabilityZone'][0:-1], + owner_id=subnet['OwnerId'], + subnet_id=subnet['SubnetId']) + subnet.should.have.key('SubnetArn').which.should.equal(subnet_arn) + subnet.should.have.key('Ipv6CidrBlockAssociationSet').which.should.equal([]) + + +@mock_ec2 +def test_describe_subnet_response_fields(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet_object = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + + subnets = client.describe_subnets(SubnetIds=[subnet_object.id])['Subnets'] + subnets.should.have.length_of(1) + subnet = subnets[0] + + subnet.should.have.key('AvailabilityZone') + subnet.should.have.key('AvailabilityZoneId') + subnet.should.have.key('AvailableIpAddressCount') + subnet.should.have.key('CidrBlock') + subnet.should.have.key('State') + subnet.should.have.key('SubnetId') + subnet.should.have.key('VpcId') + subnet.shouldnt.have.key('Tags') + subnet.should.have.key('DefaultForAz').which.should.equal(False) + subnet.should.have.key('MapPublicIpOnLaunch').which.should.equal(False) + subnet.should.have.key('OwnerId') + subnet.should.have.key('AssignIpv6AddressOnCreation').which.should.equal(False) + + subnet_arn = "arn:aws:ec2:{region}:{owner_id}:subnet/{subnet_id}".format(region=subnet['AvailabilityZone'][0:-1], + owner_id=subnet['OwnerId'], + subnet_id=subnet['SubnetId']) + subnet.should.have.key('SubnetArn').which.should.equal(subnet_arn) + subnet.should.have.key('Ipv6CidrBlockAssociationSet').which.should.equal([]) + + +@mock_ec2 +def test_create_subnet_with_invalid_availability_zone(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + subnet_availability_zone = 'asfasfas' + with assert_raises(ClientError) as ex: + subnet = client.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone=subnet_availability_zone) + assert str(ex.exception).startswith( + "An error occurred (InvalidParameterValue) when calling the CreateSubnet " + "operation: Value ({}) for parameter availabilityZone is invalid. Subnets can currently only be created in the following availability zones: ".format(subnet_availability_zone)) + + +@mock_ec2 +def test_create_subnet_with_invalid_cidr_range(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '10.1.0.0/20' + with assert_raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidSubnet.Range) when calling the CreateSubnet " + "operation: The CIDR '{}' is invalid.".format(subnet_cidr_block)) + + +@mock_ec2 +def test_create_subnet_with_invalid_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateSubnet " + "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(subnet_cidr_block)) + + +@mock_ec2 +def test_create_subnets_with_overlapping_cidr_blocks(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet_cidr_block = '10.0.0.0/24' + with assert_raises(ClientError) as ex: + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock=subnet_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidSubnet.Conflict) when calling the CreateSubnet " + "operation: The CIDR '{}' conflicts with another subnet".format(subnet_cidr_block)) diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index ccef5a288..2294979ba 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -4,11 +4,13 @@ from nose.tools import assert_raises import itertools import boto +import boto3 +from botocore.exceptions import ClientError from boto.exception import EC2ResponseError from boto.ec2.instance import Reservation import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 from nose.tools import assert_raises @@ -385,3 +387,96 @@ def test_filter_instances_by_wildcard_tags(): reservations = conn.get_all_instances(filters={'tag-value': 'Value*'}) reservations.should.have.length_of(2) + + +@mock_ec2 +def test_create_volume_with_tags(): + client = boto3.client('ec2', 'us-west-2') + response = client.create_volume( + AvailabilityZone='us-west-2', + Encrypted=False, + Size=40, + TagSpecifications=[ + { + 'ResourceType': 'volume', + 'Tags': [ + { + 'Key': 'TEST_TAG', + 'Value': 'TEST_VALUE' + } + ], + } + ] + ) + + assert response['Tags'][0]['Key'] == 'TEST_TAG' + + +@mock_ec2 +def test_create_snapshot_with_tags(): + client = boto3.client('ec2', 'us-west-2') + volume_id = client.create_volume( + AvailabilityZone='us-west-2', + Encrypted=False, + Size=40, + TagSpecifications=[ + { + 'ResourceType': 'volume', + 'Tags': [ + { + 'Key': 'TEST_TAG', + 'Value': 'TEST_VALUE' + } + ], + } + ] + )['VolumeId'] + snapshot = client.create_snapshot( + VolumeId=volume_id, + TagSpecifications=[ + { + 'ResourceType': 'snapshot', + 'Tags': [ + { + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + } + ], + } + ] + ) + + expected_tags = [{ + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + }] + + assert snapshot['Tags'] == expected_tags + + +@mock_ec2 +def test_create_tag_empty_resource(): + # create ec2 client in us-west-1 + client = boto3.client('ec2', region_name='us-west-1') + # create tag with empty resource + with assert_raises(ClientError) as ex: + client.create_tags( + Resources=[], + Tags=[{'Key': 'Value'}] + ) + ex.exception.response['Error']['Code'].should.equal('MissingParameter') + ex.exception.response['Error']['Message'].should.equal('The request must contain the parameter resourceIdSet') + + +@mock_ec2 +def test_delete_tag_empty_resource(): + # create ec2 client in us-west-1 + client = boto3.client('ec2', region_name='us-west-1') + # delete tag with empty resource + with assert_raises(ClientError) as ex: + client.delete_tags( + Resources=[], + Tags=[{'Key': 'Value'}] + ) + ex.exception.response['Error']['Code'].should.equal('MissingParameter') + ex.exception.response['Error']['Message'].should.equal('The request must contain the parameter resourceIdSet') diff --git a/tests/test_ec2/test_utils.py b/tests/test_ec2/test_utils.py index ef540e193..49192dc79 100644 --- a/tests/test_ec2/test_utils.py +++ b/tests/test_ec2/test_utils.py @@ -1,8 +1,12 @@ from moto.ec2 import utils +from .helpers import rsa_check_private_key + def test_random_key_pair(): key_pair = utils.random_key_pair() - assert len(key_pair['fingerprint']) == 59 - assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----') - assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----') + rsa_check_private_key(key_pair['material']) + + # AWS uses MD5 fingerprints, which are 47 characters long, *not* SHA1 + # fingerprints with 59 characters. + assert len(key_pair['fingerprint']) == 47 diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 6722eed60..edfbfb3c2 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -2,12 +2,15 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError +import boto3 import boto from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2, mock_ec2_deprecated from tests.helpers import requires_boto_gte @@ -86,10 +89,195 @@ def test_vpc_peering_connections_delete(): verdict.should.equal(True) all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(0) + all_vpc_pcxs.should.have.length_of(1) + all_vpc_pcxs[0]._status.code.should.equal('deleted') with assert_raises(EC2ResponseError) as cm: conn.delete_vpc_peering_connection("pcx-1234abcd") cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_vpc_peering_connections_cross_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + vpc_pcx_usw1.status['Code'].should.equal('initiating-request') + vpc_pcx_usw1.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx_usw1.accepter_vpc.id.should.equal(vpc_apn1.id) + # test cross region vpc peering connection exist + vpc_pcx_apn1 = ec2_apn1.VpcPeeringConnection(vpc_pcx_usw1.id) + vpc_pcx_apn1.id.should.equal(vpc_pcx_usw1.id) + vpc_pcx_apn1.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx_apn1.accepter_vpc.id.should.equal(vpc_apn1.id) + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_fail(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering wrong region with no vpc + with assert_raises(ClientError) as cm: + ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-2') + cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_accept(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # accept peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + acp_pcx_apn1 = ec2_apn1.accept_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + acp_pcx_apn1['VpcPeeringConnection']['Status']['Code'].should.equal('active') + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('active') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_reject(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + rej_pcx_apn1 = ec2_apn1.reject_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + rej_pcx_apn1['Return'].should.equal(True) + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('rejected') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_delete(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject peering from ap-northeast-1 + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + del_pcx_apn1 = ec2_apn1.delete_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + des_pcx_apn1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + des_pcx_usw1 = ec2_usw1.describe_vpc_peering_connections( + VpcPeeringConnectionIds=[vpc_pcx_usw1.id] + ) + del_pcx_apn1['Return'].should.equal(True) + des_pcx_apn1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted') + des_pcx_usw1['VpcPeeringConnections'][0]['Status']['Code'].should.equal('deleted') + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_accept_wrong_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + + # accept wrong peering from us-west-1 which will raise error + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + with assert_raises(ClientError) as cm: + ec2_usw1.accept_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted') + exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \ + 'peering connection {1} must be ' \ + 'accepted in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1') + cm.exception.response['Error']['Message'].should.equal(exp_msg) + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_reject_wrong_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx_usw1 = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + # reject wrong peering from us-west-1 which will raise error + ec2_apn1 = boto3.client('ec2', region_name='ap-northeast-1') + ec2_usw1 = boto3.client('ec2', region_name='us-west-1') + with assert_raises(ClientError) as cm: + ec2_usw1.reject_vpc_peering_connection( + VpcPeeringConnectionId=vpc_pcx_usw1.id + ) + cm.exception.response['Error']['Code'].should.equal('OperationNotPermitted') + exp_msg = 'Incorrect region ({0}) specified for this request.VPC ' \ + 'peering connection {1} must be accepted or ' \ + 'rejected in region {2}'.format('us-west-1', vpc_pcx_usw1.id, 'ap-northeast-1') + cm.exception.response['Error']['Message'].should.equal(exp_msg) diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index fc0a93cbb..ad17deb3c 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises # flake8: noqa from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError import boto3 import boto @@ -275,8 +277,8 @@ def test_default_vpc(): def test_non_default_vpc(): ec2 = boto3.resource('ec2', region_name='us-west-1') - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') + # Create the default VPC - this already exists when backend instantiated! + #ec2.create_vpc(CidrBlock='172.31.0.0/16') # Create the non default VPC vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') @@ -295,6 +297,12 @@ def test_non_default_vpc(): attr = response.get('EnableDnsHostnames') attr.get('Value').shouldnt.be.ok + # Check Primary CIDR Block Associations + cidr_block_association_set = next(iter(vpc.cidr_block_association_set), None) + cidr_block_association_set['CidrBlockState']['State'].should.equal('associated') + cidr_block_association_set['CidrBlock'].should.equal(vpc.cidr_block) + cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + @mock_ec2 def test_vpc_dedicated_tenancy(): @@ -340,7 +348,6 @@ def test_vpc_modify_enable_dns_hostnames(): ec2.create_vpc(CidrBlock='172.31.0.0/16') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - # Test default values for VPC attributes response = vpc.describe_attribute(Attribute='enableDnsHostnames') attr = response.get('EnableDnsHostnames') @@ -364,3 +371,195 @@ def test_vpc_associate_dhcp_options(): vpc.update() dhcp_options.id.should.equal(vpc.dhcp_options_id) + + +@mock_ec2 +def test_associate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + + # Associate/Extend vpc CIDR range up to 5 ciders + for i in range(43, 47): + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('associating') + response['CidrBlockAssociation']['CidrBlock'].should.equal('10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc') + + # Check all associations exist + vpc = ec2.Vpc(vpc.id) + vpc.cidr_block_association_set.should.have.length_of(5) + vpc.cidr_block_association_set[2]['CidrBlockState']['State'].should.equal('associated') + vpc.cidr_block_association_set[4]['CidrBlockState']['State'].should.equal('associated') + + # Check error on adding 6th association. + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.50.0/22') + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format(vpc.id)) + +@mock_ec2 +def test_disassociate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.43.0/24') + + # Remove an extended cidr block + vpc = ec2.Vpc(vpc.id) + non_default_assoc_cidr_block = next(iter([x for x in vpc.cidr_block_association_set if vpc.cidr_block != x['CidrBlock']]), None) + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=non_default_assoc_cidr_block['AssociationId']) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('disassociating') + response['CidrBlockAssociation']['CidrBlock'].should.equal(non_default_assoc_cidr_block['CidrBlock']) + response['CidrBlockAssociation']['AssociationId'].should.equal(non_default_assoc_cidr_block['AssociationId']) + + # Error attempting to delete a non-existent CIDR_BLOCK association + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId='vpc-cidr-assoc-BORING123') + str(ex.exception).should.equal( + "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " + "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " + "'vpc-cidr-assoc-BORING123' does not exist") + + # Error attempting to delete Primary CIDR BLOCK association + vpc_base_cidr_assoc_id = next(iter([x for x in vpc.cidr_block_association_set + if vpc.cidr_block == x['CidrBlock']]), {})['AssociationId'] + + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=vpc_base_cidr_assoc_id) + str(ex.exception).should.equal( + "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " + "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " + "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id)) + +@mock_ec2 +def test_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + vpc3_assoc_response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.3.0/24') + + # Test filters for a cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.cidr-block', + 'Values': ['10.10.0.0/19']}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association id in VPCs + association_id = vpc3_assoc_response['CidrBlockAssociation']['AssociationId'] + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': [association_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': ['failing']}])) + filtered_vpcs.should.be.length_of(0) + +@mock_ec2 +def test_vpc_associate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + ipv6_cidr_block_association_set = next(iter(vpc.ipv6_cidr_block_association_set), None) + ipv6_cidr_block_association_set['Ipv6CidrBlockState']['State'].should.equal('associated') + ipv6_cidr_block_association_set['Ipv6CidrBlock'].should.contain('::/56') + ipv6_cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + + # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format(vpc.id)) + + # Test associate ipv6 cidr block after vpc created + vpc = ec2.create_vpc(CidrBlock='10.10.50.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('associating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc-') + + # Check on describe vpc that has ipv6 cidr block association + vpc = ec2.Vpc(vpc.id) + vpc.ipv6_cidr_block_association_set.should.be.length_of(1) + + +@mock_ec2 +def test_vpc_disassociate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + # Test disassociating the only IPV6 + assoc_id = vpc.ipv6_cidr_block_association_set[0]['AssociationId'] + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=assoc_id) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('disassociating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.equal(assoc_id) + + +@mock_ec2 +def test_ipv6_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16', AmazonProvidedIpv6CidrBlock=True) + vpc2_assoc_ipv6_assoc_id = vpc2.ipv6_cidr_block_association_set[0]['AssociationId'] + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, AmazonProvidedIpv6CidrBlock=True) + vpc3_ipv6_cidr_block = response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + + vpc4 = ec2.create_vpc(CidrBlock='10.95.0.0/16') # Here for its looks + + # Test filters for an ipv6 cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.ipv6-cidr-block', + 'Values': [vpc3_ipv6_cidr_block]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association id in VPCs + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.association-id', + 'Values': [vpc2_assoc_ipv6_assoc_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', + 'Values': ['associated']}])) + filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs + + +@mock_ec2 +def test_create_vpc_with_invalid_cidr_block_parameter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc_cidr_block = '1000.1.0.0/20' + with assert_raises(ClientError) as ex: + vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidParameterValue) when calling the CreateVpc " + "operation: Value ({}) for parameter cidrBlock is invalid. This is not a valid CIDR block.".format(vpc_cidr_block)) + + +@mock_ec2 +def test_create_vpc_with_invalid_cidr_range(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc_cidr_block = '10.1.0.0/29' + with assert_raises(ClientError) as ex: + vpc = ec2.create_vpc(CidrBlock=vpc_cidr_block) + str(ex.exception).should.equal( + "An error occurred (InvalidVpc.Range) when calling the CreateVpc " + "operation: The CIDR '{}' is invalid.".format(vpc_cidr_block)) diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 00628e22f..ec0e4e732 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -3,16 +3,19 @@ from __future__ import unicode_literals import hashlib import json from datetime import datetime +from freezegun import freeze_time +import os from random import random import re import sure # noqa import boto3 -from botocore.exceptions import ClientError +from botocore.exceptions import ClientError, ParamValidationError from dateutil.tz import tzlocal from moto import mock_ecr +from nose import SkipTest def _create_image_digest(contents=None): @@ -45,7 +48,8 @@ def _create_image_manifest(): { "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "size": 73109, - "digest": _create_image_digest("layer3") + # randomize image digest + "digest": _create_image_digest() } ] } @@ -198,6 +202,84 @@ def test_put_image(): response['image']['registryId'].should.equal('012345678910') +@mock_ecr +def test_put_image_with_push_date(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + with freeze_time('2018-08-28 00:00:00'): + image1_date = datetime.now() + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + with freeze_time('2019-05-31 00:00:00'): + image2_date = datetime.now() + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + describe_response = client.describe_images(repositoryName='test_repository') + + type(describe_response['imageDetails']).should.be(list) + len(describe_response['imageDetails']).should.be(2) + + set([describe_response['imageDetails'][0]['imagePushedAt'], + describe_response['imageDetails'][1]['imagePushedAt']]).should.equal(set([image1_date, image2_date])) + + +@mock_ecr +def test_put_image_with_multiple_tags(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + manifest = _create_image_manifest() + response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag='v1' + ) + + response['image']['imageId']['imageTag'].should.equal('v1') + response['image']['imageId']['imageDigest'].should.contain("sha") + response['image']['repositoryName'].should.equal('test_repository') + response['image']['registryId'].should.equal('012345678910') + + response1 = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag='latest' + ) + + response1['image']['imageId']['imageTag'].should.equal('latest') + response1['image']['imageId']['imageDigest'].should.contain("sha") + response1['image']['repositoryName'].should.equal('test_repository') + response1['image']['registryId'].should.equal('012345678910') + + response2 = client.describe_images(repositoryName='test_repository') + type(response2['imageDetails']).should.be(list) + len(response2['imageDetails']).should.be(1) + + response2['imageDetails'][0]['imageDigest'].should.contain("sha") + + response2['imageDetails'][0]['registryId'].should.equal("012345678910") + + response2['imageDetails'][0]['repositoryName'].should.equal("test_repository") + + len(response2['imageDetails'][0]['imageTags']).should.be(2) + response2['imageDetails'][0]['imageTags'].should.be.equal(['v1', 'latest']) + + @mock_ecr def test_list_images(): client = boto3.client('ecr', region_name='us-east-1') @@ -247,9 +329,31 @@ def test_list_images(): len(response['imageIds']).should.be(1) response['imageIds'][0]['imageTag'].should.equal('oldest') - response = client.list_images(repositoryName='test_repository_2', registryId='109876543210') - type(response['imageIds']).should.be(list) - len(response['imageIds']).should.be(0) + +@mock_ecr +def test_list_images_from_repository_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository_1' + ) + + # non existing repo + error_msg = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + client.list_images.when.called_with( + repositoryName='repo-that-doesnt-exist', + registryId='123', + ).should.throw(Exception, error_msg) + + # repo does not exist in specified registry + error_msg = re.compile( + r".*The repository with name 'test_repository_1' does not exist in the registry with id '222'.*", + re.MULTILINE) + client.list_images.when.called_with( + repositoryName='test_repository_1', + registryId='222', + ).should.throw(Exception, error_msg) @mock_ecr @@ -259,6 +363,11 @@ def test_describe_images(): repositoryName='test_repository' ) + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()) + ) + _ = client.put_image( repositoryName='test_repository', imageManifest=json.dumps(_create_image_manifest()), @@ -279,32 +388,37 @@ def test_describe_images(): response = client.describe_images(repositoryName='test_repository') type(response['imageDetails']).should.be(list) - len(response['imageDetails']).should.be(3) + len(response['imageDetails']).should.be(4) response['imageDetails'][0]['imageDigest'].should.contain("sha") response['imageDetails'][1]['imageDigest'].should.contain("sha") response['imageDetails'][2]['imageDigest'].should.contain("sha") + response['imageDetails'][3]['imageDigest'].should.contain("sha") response['imageDetails'][0]['registryId'].should.equal("012345678910") response['imageDetails'][1]['registryId'].should.equal("012345678910") response['imageDetails'][2]['registryId'].should.equal("012345678910") + response['imageDetails'][3]['registryId'].should.equal("012345678910") response['imageDetails'][0]['repositoryName'].should.equal("test_repository") response['imageDetails'][1]['repositoryName'].should.equal("test_repository") response['imageDetails'][2]['repositoryName'].should.equal("test_repository") + response['imageDetails'][3]['repositoryName'].should.equal("test_repository") - len(response['imageDetails'][0]['imageTags']).should.be(1) + response['imageDetails'][0].should_not.have.key('imageTags') len(response['imageDetails'][1]['imageTags']).should.be(1) len(response['imageDetails'][2]['imageTags']).should.be(1) + len(response['imageDetails'][3]['imageTags']).should.be(1) image_tags = ['latest', 'v1', 'v2'] - set([response['imageDetails'][0]['imageTags'][0], - response['imageDetails'][1]['imageTags'][0], - response['imageDetails'][2]['imageTags'][0]]).should.equal(set(image_tags)) + set([response['imageDetails'][1]['imageTags'][0], + response['imageDetails'][2]['imageTags'][0], + response['imageDetails'][3]['imageTags'][0]]).should.equal(set(image_tags)) response['imageDetails'][0]['imageSizeInBytes'].should.equal(52428800) response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) + response['imageDetails'][3]['imageSizeInBytes'].should.equal(52428800) @mock_ecr @@ -333,6 +447,68 @@ def test_describe_images_by_tag(): image_detail['imageDigest'].should.equal(put_response['imageId']['imageDigest']) +@mock_ecr +def test_describe_images_tags_should_not_contain_empty_tag1(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest) + ) + + tags = ['v1', 'v2', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + len(image_detail['imageTags']).should.equal(3) + image_detail['imageTags'].should.be.equal(tags) + + +@mock_ecr +def test_describe_images_tags_should_not_contain_empty_tag2(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + tags = ['v1', 'v2'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest) + ) + + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag='latest' + ) + + response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + len(image_detail['imageTags']).should.equal(3) + image_detail['imageTags'].should.be.equal(['v1', 'v2', 'latest']) + + @mock_ecr def test_describe_repository_that_doesnt_exist(): client = boto3.client('ecr', region_name='us-east-1') @@ -418,7 +594,7 @@ def test_get_authorization_token_assume_region(): auth_token_response.should.contain('ResponseMetadata') auth_token_response['authorizationData'].should.equal([ { - 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', + 'authorizationToken': 'QVdTOjAxMjM0NTY3ODkxMC1hdXRoLXRva2Vu', 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) }, @@ -428,20 +604,478 @@ def test_get_authorization_token_assume_region(): @mock_ecr def test_get_authorization_token_explicit_regions(): client = boto3.client('ecr', region_name='us-east-1') - auth_token_response = client.get_authorization_token(registryIds=['us-east-1', 'us-west-1']) + auth_token_response = client.get_authorization_token(registryIds=['10987654321', '878787878787']) auth_token_response.should.contain('authorizationData') auth_token_response.should.contain('ResponseMetadata') auth_token_response['authorizationData'].should.equal([ { - 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', - 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', + 'authorizationToken': 'QVdTOjEwOTg3NjU0MzIxLWF1dGgtdG9rZW4=', + 'proxyEndpoint': 'https://10987654321.dkr.ecr.us-east-1.amazonaws.com', 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()), }, { - 'authorizationToken': 'QVdTOnVzLXdlc3QtMS1hdXRoLXRva2Vu', - 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-west-1.amazonaws.com', + 'authorizationToken': 'QVdTOjg3ODc4Nzg3ODc4Ny1hdXRoLXRva2Vu', + 'proxyEndpoint': 'https://878787878787.dkr.ecr.us-east-1.amazonaws.com', 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) } ]) + + +@mock_ecr +def test_batch_get_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v2' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(1) + + response['images'][0]['imageManifest'].should.contain("vnd.docker.distribution.manifest.v2+json") + response['images'][0]['registryId'].should.equal("012345678910") + response['images'][0]['repositoryName'].should.equal("test_repository") + + response['images'][0]['imageId']['imageTag'].should.equal("v2") + response['images'][0]['imageId']['imageDigest'].should.contain("sha") + + type(response['failures']).should.be(list) + len(response['failures']).should.be(0) + + +@mock_ecr +def test_batch_get_image_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v5' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(0) + + type(response['failures']).should.be(list) + len(response['failures']).should.be(1) + response['failures'][0]['failureReason'].should.equal("Requested image not found") + response['failures'][0]['failureCode'].should.equal("ImageNotFound") + response['failures'][0]['imageId']['imageTag'].should.equal("v5") + + +@mock_ecr +def test_batch_get_image_no_tags(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + error_msg = re.compile( + r".*Missing required parameter in input: \"imageIds\".*", + re.MULTILINE) + + client.batch_get_image.when.called_with( + repositoryName='test_repository').should.throw( + ParamValidationError, error_msg) + + +@mock_ecr +def test_batch_delete_image_by_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v1.0', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag, + ) + + describe_response1 = client.describe_images(repositoryName='test_repository') + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'latest' + }, + ], + ) + + describe_response2 = client.describe_images(repositoryName='test_repository') + + type(describe_response1['imageDetails'][0]['imageTags']).should.be(list) + len(describe_response1['imageDetails'][0]['imageTags']).should.be(3) + + type(describe_response2['imageDetails'][0]['imageTags']).should.be(list) + len(describe_response2['imageDetails'][0]['imageTags']).should.be(2) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(1) + + batch_delete_response['imageIds'][0]['imageTag'].should.equal("latest") + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(0) + + +@mock_ecr +def test_batch_delete_image_delete_last_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1', + ) + + describe_response1 = client.describe_images(repositoryName='test_repository') + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v1' + }, + ], + ) + + describe_response2 = client.describe_images(repositoryName='test_repository') + + type(describe_response1['imageDetails'][0]['imageTags']).should.be(list) + len(describe_response1['imageDetails'][0]['imageTags']).should.be(1) + + type(describe_response2['imageDetails']).should.be(list) + len(describe_response2['imageDetails']).should.be(0) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(1) + + batch_delete_response['imageIds'][0]['imageTag'].should.equal("v1") + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(0) + + +@mock_ecr +def test_batch_delete_image_with_nonexistent_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v1.0', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag, + ) + + describe_response = client.describe_images(repositoryName='test_repository') + + missing_tag = "missing-tag" + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': missing_tag + }, + ], + ) + + type(describe_response['imageDetails'][0]['imageTags']).should.be(list) + len(describe_response['imageDetails'][0]['imageTags']).should.be(3) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(0) + + batch_delete_response['failures'][0]['imageId']['imageTag'].should.equal(missing_tag) + batch_delete_response['failures'][0]['failureCode'].should.equal("ImageNotFound") + batch_delete_response['failures'][0]['failureReason'].should.equal("Requested image not found") + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(1) + + +@mock_ecr +def test_batch_delete_image_by_digest(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v2', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + describe_response = client.describe_images(repositoryName='test_repository') + image_digest = describe_response['imageDetails'][0]['imageDigest'] + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageDigest': image_digest + }, + ], + ) + + describe_response = client.describe_images(repositoryName='test_repository') + + type(describe_response['imageDetails']).should.be(list) + len(describe_response['imageDetails']).should.be(0) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(3) + + batch_delete_response['imageIds'][0]['imageDigest'].should.equal(image_digest) + batch_delete_response['imageIds'][1]['imageDigest'].should.equal(image_digest) + batch_delete_response['imageIds'][2]['imageDigest'].should.equal(image_digest) + + set([ + batch_delete_response['imageIds'][0]['imageTag'], + batch_delete_response['imageIds'][1]['imageTag'], + batch_delete_response['imageIds'][2]['imageTag']]).should.equal(set(tags)) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(0) + + +@mock_ecr +def test_batch_delete_image_with_invalid_digest(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v2', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + invalid_image_digest = 'sha256:invalid-digest' + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageDigest': invalid_image_digest + }, + ], + ) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(0) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(1) + + batch_delete_response['failures'][0]['imageId']['imageDigest'].should.equal(invalid_image_digest) + batch_delete_response['failures'][0]['failureCode'].should.equal("InvalidImageDigest") + batch_delete_response['failures'][0]['failureReason'].should.equal("Invalid request parameters: image digest should satisfy the regex '[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+'") + + +@mock_ecr +def test_batch_delete_image_with_missing_parameters(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + }, + ], + ) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(0) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(1) + + batch_delete_response['failures'][0]['failureCode'].should.equal("MissingDigestAndTag") + batch_delete_response['failures'][0]['failureReason'].should.equal("Invalid request parameters: both tag and digest cannot be null") + + +@mock_ecr +def test_batch_delete_image_with_matching_digest_and_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'v1.0', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + describe_response = client.describe_images(repositoryName='test_repository') + image_digest = describe_response['imageDetails'][0]['imageDigest'] + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageDigest': image_digest, + 'imageTag': 'v1' + }, + ], + ) + + describe_response = client.describe_images(repositoryName='test_repository') + + type(describe_response['imageDetails']).should.be(list) + len(describe_response['imageDetails']).should.be(0) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(3) + + batch_delete_response['imageIds'][0]['imageDigest'].should.equal(image_digest) + batch_delete_response['imageIds'][1]['imageDigest'].should.equal(image_digest) + batch_delete_response['imageIds'][2]['imageDigest'].should.equal(image_digest) + + set([ + batch_delete_response['imageIds'][0]['imageTag'], + batch_delete_response['imageIds'][1]['imageTag'], + batch_delete_response['imageIds'][2]['imageTag']]).should.equal(set(tags)) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(0) + + +@mock_ecr +def test_batch_delete_image_with_mismatched_digest_and_tag(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + + tags = ['v1', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + describe_response = client.describe_images(repositoryName='test_repository') + image_digest = describe_response['imageDetails'][0]['imageDigest'] + + batch_delete_response = client.batch_delete_image( + registryId='012345678910', + repositoryName='test_repository', + imageIds=[ + { + 'imageDigest': image_digest, + 'imageTag': 'v2' + }, + ], + ) + + type(batch_delete_response['imageIds']).should.be(list) + len(batch_delete_response['imageIds']).should.be(0) + + type(batch_delete_response['failures']).should.be(list) + len(batch_delete_response['failures']).should.be(1) + + batch_delete_response['failures'][0]['imageId']['imageDigest'].should.equal(image_digest) + batch_delete_response['failures'][0]['imageId']['imageTag'].should.equal("v2") + batch_delete_response['failures'][0]['failureCode'].should.equal("ImageNotFound") + batch_delete_response['failures'][0]['failureReason'].should.equal("Requested image not found") diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 9e5e4ff08..b147c4159 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -2,13 +2,14 @@ from __future__ import unicode_literals from copy import deepcopy +from botocore.exceptions import ClientError import boto3 import sure # noqa import json from moto.ec2 import utils as ec2_utils from uuid import UUID -from moto import mock_cloudformation +from moto import mock_cloudformation, mock_elbv2 from moto import mock_ecs from moto import mock_ec2 from nose.tools import assert_raises @@ -46,6 +47,15 @@ def test_list_clusters(): 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') +@mock_ecs +def test_describe_clusters(): + client = boto3.client('ecs', region_name='us-east-1') + response = client.describe_clusters(clusters=["some-cluster"]) + response['failures'].should.contain({ + 'arn': 'arn:aws:ecs:us-east-1:012345678910:cluster/some-cluster', + 'reason': 'MISSING' + }) + @mock_ecs def test_delete_cluster(): client = boto3.client('ecs', region_name='us-east-1') @@ -303,6 +313,52 @@ def test_create_service(): response['service']['status'].should.equal('ACTIVE') response['service']['taskDefinition'].should.equal( 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('REPLICA') + +@mock_ecs +def test_create_service_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON', + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(0) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('DAEMON') @mock_ecs @@ -332,23 +388,32 @@ def test_list_services(): cluster='test_ecs_cluster', serviceName='test_ecs_service1', taskDefinition='test_ecs_task', + schedulingStrategy='REPLICA', desiredCount=2 ) _ = client.create_service( cluster='test_ecs_cluster', serviceName='test_ecs_service2', taskDefinition='test_ecs_task', + schedulingStrategy='DAEMON', desiredCount=2 ) - response = client.list_services( + unfiltered_response = client.list_services( cluster='test_ecs_cluster' ) - len(response['serviceArns']).should.equal(2) - response['serviceArns'][0].should.equal( + len(unfiltered_response['serviceArns']).should.equal(2) + unfiltered_response['serviceArns'][0].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['serviceArns'][1].should.equal( + unfiltered_response['serviceArns'][1].should.equal( 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + filtered_response = client.list_services( + cluster='test_ecs_cluster', + schedulingStrategy='REPLICA' + ) + len(filtered_response['serviceArns']).should.equal(1) + filtered_response['serviceArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') @mock_ecs def test_describe_services(): @@ -410,6 +475,72 @@ def test_describe_services(): response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') +@mock_ecs +def test_describe_services_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service1', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service2', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON' + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service3', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response = client.describe_services( + cluster='test_ecs_cluster', + services=['test_ecs_service1', + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2', + 'test_ecs_service3'] + ) + len(response['services']).should.equal(3) + response['services'][0]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['services'][0]['serviceName'].should.equal('test_ecs_service1') + response['services'][1]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['services'][1]['serviceName'].should.equal('test_ecs_service2') + + response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) + response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) + response['services'][0]['deployments'][0]['runningCount'].should.equal(0) + response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') + + response['services'][0]['schedulingStrategy'].should.equal('REPLICA') + response['services'][1]['schedulingStrategy'].should.equal('DAEMON') + response['services'][2]['schedulingStrategy'].should.equal('REPLICA') + + @mock_ecs def test_update_service(): client = boto3.client('ecs', region_name='us-east-1') @@ -448,6 +579,22 @@ def test_update_service(): desiredCount=0 ) response['service']['desiredCount'].should.equal(0) + response['service']['schedulingStrategy'].should.equal('REPLICA') + + +@mock_ecs +def test_update_missing_service(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + + client.update_service.when.called_with( + cluster='test_ecs_cluster', + service='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=0 + ).should.throw(ClientError) @mock_ecs @@ -499,10 +646,27 @@ def test_delete_service(): 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') response['service']['serviceName'].should.equal('test_ecs_service') response['service']['status'].should.equal('ACTIVE') + response['service']['schedulingStrategy'].should.equal('REPLICA') response['service']['taskDefinition'].should.equal( 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') +@mock_ecs +def test_update_non_existant_service(): + client = boto3.client('ecs', region_name='us-east-1') + try: + client.update_service( + cluster="my-clustet", + service="my-service", + desiredCount=0, + ) + except ClientError as exc: + error_code = exc.response['Error']['Code'] + error_code.should.equal('ServiceNotFoundException') + else: + raise Exception("Didn't raise ClientError") + + @mock_ec2 @mock_ecs def test_register_container_instance(): @@ -664,7 +828,7 @@ def test_list_container_instances(): instanceIdentityDocument=instance_id_document) test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) + 'containerInstanceArn']) response = ecs_client.list_container_instances(cluster=test_cluster_name) @@ -702,7 +866,7 @@ def test_describe_container_instances(): instanceIdentityDocument=instance_id_document) test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) + 'containerInstanceArn']) test_instance_ids = list( map((lambda x: x.split('/')[1]), test_instance_arns)) @@ -779,6 +943,65 @@ def test_update_container_instances_state(): status='test_status').should.throw(Exception) +@mock_ec2 +@mock_ecs +def test_update_container_instances_state_by_arn(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instance_to_create = 3 + test_instance_arns = [] + for i in range(0, instance_to_create): + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document) + + test_instance_arns.append(response['containerInstance']['containerInstanceArn']) + + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_arns, + status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_arns, + status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_arns, + status='ACTIVE') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('ACTIVE') + ecs_client.update_container_instances_state.when.called_with(cluster=test_cluster_name, + containerInstances=test_instance_arns, + status='test_status').should.throw(Exception) + + @mock_ec2 @mock_ecs def test_run_task(): @@ -1052,7 +1275,14 @@ def test_describe_tasks(): len(response['tasks']).should.equal(2) set([response['tasks'][0]['taskArn'], response['tasks'] - [1]['taskArn']]).should.equal(set(tasks_arns)) + [1]['taskArn']]).should.equal(set(tasks_arns)) + + # Test we can pass task ids instead of ARNs + response = client.describe_tasks( + cluster='test_ecs_cluster', + tasks=[tasks_arns[0].split("/")[-1]] + ) + len(response['tasks']).should.equal(1) @mock_ecs @@ -1208,7 +1438,8 @@ def test_resource_reservation_and_release(): cluster='test_ecs_cluster', containerInstances=[container_instance_arn] )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) remaining_resources['CPU'].should.equal(registered_resources['CPU'] - 1024) remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) registered_resources['PORTS'].append('80') @@ -1223,6 +1454,87 @@ def test_resource_reservation_and_release(): cluster='test_ecs_cluster', containerInstances=[container_instance_arn] )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(0) + +@mock_ec2 +@mock_ecs +def test_resource_reservation_and_release_memory_reservation(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'memoryReservation': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'}, + 'portMappings': [ + { + 'containerPort': 8080 + } + ] + } + ] + ) + run_response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=1, + startedBy='moto' + ) + container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(1) + client.stop_task( + cluster='test_ecs_cluster', + task=run_response['tasks'][0].get('taskArn'), + reason='moto testing' + ) + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) remaining_resources['CPU'].should.equal(registered_resources['CPU']) remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) @@ -1230,6 +1542,7 @@ def test_resource_reservation_and_release(): container_instance_description['runningTasksCount'].should.equal(0) + @mock_ecs @mock_cloudformation def test_create_cluster_through_cloudformation(): @@ -1246,6 +1559,36 @@ def test_create_cluster_through_cloudformation(): } } template_json = json.dumps(template) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(0) + + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation_no_name(): + # cloudformation should create a cluster name for you if you do not provide it + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + } + } + } + template_json = json.dumps(template) cfn_conn = boto3.client('cloudformation', region_name='us-west-1') cfn_conn.create_stack( StackName="test_stack", @@ -1319,15 +1662,20 @@ def test_create_task_definition_through_cloudformation(): } template_json = json.dumps(template) cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + stack_name = 'test_stack' cfn_conn.create_stack( - StackName="test_stack", + StackName=stack_name, TemplateBody=template_json, ) ecs_conn = boto3.client('ecs', region_name='us-west-1') resp = ecs_conn.list_task_definitions() len(resp['taskDefinitionArns']).should.equal(1) + task_definition_arn = resp['taskDefinitionArns'][0] + task_definition_details = cfn_conn.describe_stack_resource( + StackName=stack_name,LogicalResourceId='testTaskDefinition')['StackResourceDetail'] + task_definition_details['PhysicalResourceId'].should.equal(task_definition_arn) @mock_ec2 @mock_ecs @@ -1624,11 +1972,13 @@ def test_attributes(): clusterName=test_cluster_name ) + instances = [] test_instance = ec2.create_instances( ImageId="ami-1234abcd", MinCount=1, MaxCount=1, )[0] + instances.append(test_instance) instance_id_document = json.dumps( ec2_utils.generate_instance_identity_document(test_instance) @@ -1648,6 +1998,7 @@ def test_attributes(): MinCount=1, MaxCount=1, )[0] + instances.append(test_instance) instance_id_document = json.dumps( ec2_utils.generate_instance_identity_document(test_instance) @@ -1671,7 +2022,8 @@ def test_attributes(): attributes=[ {'name': 'env', 'value': 'prod'}, {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1}, - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} ] ) @@ -1680,25 +2032,31 @@ def test_attributes(): targetType='container-instance' ) attrs = resp['attributes'] - len(attrs).should.equal(4) + + NUM_CUSTOM_ATTRIBUTES = 4 # 2 specific to individual machines and 1 global, going to both machines (2 + 1*2) + NUM_DEFAULT_ATTRIBUTES = 4 + len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) # Tests that the attrs have been set properly len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2) - len(list(filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) + len(list( + filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) ecs_client.delete_attributes( cluster=test_cluster_name, attributes=[ - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} ] ) + NUM_CUSTOM_ATTRIBUTES -= 1 resp = ecs_client.list_attributes( cluster=test_cluster_name, targetType='container-instance' ) attrs = resp['attributes'] - len(attrs).should.equal(3) + len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) @mock_ecs @@ -1757,15 +2115,192 @@ def test_list_task_definition_families(): len(resp2['families']).should.equal(1) +@mock_ec2 +@mock_ecs +def test_default_container_instance_attributes(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + # Create cluster and EC2 instance + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + # Register container instance + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + container_instance_id = full_arn.rsplit('/', 1)[-1] + + default_attributes = response['containerInstance']['attributes'] + assert len(default_attributes) == 4 + expected_result = [ + {'name': 'ecs.availability-zone', 'value': test_instance.placement['AvailabilityZone']}, + {'name': 'ecs.ami-id', 'value': test_instance.image_id}, + {'name': 'ecs.instance-type', 'value': test_instance.instance_type}, + {'name': 'ecs.os-type', 'value': test_instance.platform or 'linux'} + ] + assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, + key=lambda item: item['name']) + + +@mock_ec2 +@mock_ecs +def test_describe_container_instances_with_attributes(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + # Create cluster and EC2 instance + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + # Register container instance + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + container_instance_id = full_arn.rsplit('/', 1)[-1] + default_attributes = response['containerInstance']['attributes'] + + # Set attributes on container instance, one without a value + attributes = [ + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, + 'targetType': 'container-instance'}, + {'name': 'attr_without_value'} + ] + ecs_client.put_attributes( + cluster=test_cluster_name, + attributes=attributes + ) + + # Describe container instance, should have attributes previously set + described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, + containerInstances=[container_instance_id]) + + assert len(described_instance['containerInstances']) == 1 + assert isinstance(described_instance['containerInstances'][0]['attributes'], list) + + # Remove additional info passed to put_attributes + cleaned_attributes = [] + for attribute in attributes: + attribute.pop('targetId', None) + attribute.pop('targetType', None) + cleaned_attributes.append(attribute) + described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], + key=lambda item: item['name']) + expected_attributes = sorted(default_attributes + cleaned_attributes, key=lambda item: item['name']) + assert described_attributes == expected_attributes + + def _fetch_container_instance_resources(container_instance_description): remaining_resources = {} registered_resources = {} remaining_resources_list = container_instance_description['remainingResources'] registered_resources_list = container_instance_description['registeredResources'] - remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][0] - remaining_resources['MEMORY'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] - remaining_resources['PORTS'] = [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] - registered_resources['CPU'] = [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] - registered_resources['MEMORY'] = [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] - registered_resources['PORTS'] = [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] + remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][ + 0] + remaining_resources['MEMORY'] = \ + [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] + remaining_resources['PORTS'] = \ + [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] + registered_resources['CPU'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] + registered_resources['MEMORY'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] + registered_resources['PORTS'] = \ + [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] return remaining_resources, registered_resources + + +@mock_ecs +def test_create_service_load_balancing(): + client = boto3.client('ecs', region_name='us-east-1') + client.create_cluster( + clusterName='test_ecs_cluster' + ) + client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2, + loadBalancers=[ + { + 'targetGroupArn': 'test_target_group_arn', + 'loadBalancerName': 'test_load_balancer_name', + 'containerName': 'test_container_name', + 'containerPort': 123 + } + ] + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(1) + response['service']['loadBalancers'][0]['targetGroupArn'].should.equal( + 'test_target_group_arn') + response['service']['loadBalancers'][0]['loadBalancerName'].should.equal( + 'test_load_balancer_name') + response['service']['loadBalancers'][0]['containerName'].should.equal( + 'test_container_name') + response['service']['loadBalancers'][0]['containerPort'].should.equal(123) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 5827e70c7..447896f15 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -21,7 +21,7 @@ from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated @mock_ec2_deprecated def test_create_load_balancer(): conn = boto.connect_elb() - ec2 = boto.connect_ec2('the_key', 'the_secret') + ec2 = boto.ec2.connect_to_region("us-east-1") security_group = ec2.create_security_group('sg-abc987', 'description') @@ -723,6 +723,40 @@ def test_describe_instance_health(): instances_health[0].state.should.equal('InService') +@mock_ec2 +@mock_elb +def test_describe_instance_health_boto3(): + elb = boto3.client('elb', region_name="us-east-1") + ec2 = boto3.client('ec2', region_name="us-east-1") + instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances'] + lb_name = "my_load_balancer" + elb.create_load_balancer( + Listeners=[{ + 'InstancePort': 80, + 'LoadBalancerPort': 8080, + 'Protocol': 'HTTP' + }], + LoadBalancerName=lb_name, + ) + elb.register_instances_with_load_balancer( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instances[0]['InstanceId']}] + ) + instances_health = elb.describe_instance_health( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instance['InstanceId']} for instance in instances] + ) + instances_health['InstanceStates'].should.have.length_of(2) + instances_health['InstanceStates'][0]['InstanceId'].\ + should.equal(instances[0]['InstanceId']) + instances_health['InstanceStates'][0]['State'].\ + should.equal('InService') + instances_health['InstanceStates'][1]['InstanceId'].\ + should.equal(instances[1]['InstanceId']) + instances_health['InstanceStates'][1]['State'].\ + should.equal('Unknown') + + @mock_elb def test_add_remove_tags(): client = boto3.client('elb', region_name='us-east-1') diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 4f0b1a9cd..879a04cd8 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals + +import json import os import boto3 import botocore @@ -6,7 +8,7 @@ from botocore.exceptions import ClientError from nose.tools import assert_raises import sure # noqa -from moto import mock_elbv2, mock_ec2, mock_acm +from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation from moto.elbv2 import elbv2_backends @@ -25,7 +27,7 @@ def test_create_load_balancer(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -67,7 +69,7 @@ def test_describe_load_balancers(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') conn.create_load_balancer( @@ -110,7 +112,7 @@ def test_add_remove_tags(): vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') subnet1 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, @@ -232,7 +234,7 @@ def test_create_elb_in_multiple_region(): InstanceTenancy='default') subnet1 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone=region + 'a') subnet2 = ec2.create_subnet( VpcId=vpc.id, @@ -273,7 +275,7 @@ def test_create_target_group_and_listeners(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -340,6 +342,10 @@ def test_create_target_group_and_listeners(): 'Type': 'forward'}]) http_listener_arn = listener.get('ListenerArn') + response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn, + Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + # And another with SSL response = conn.create_listener( LoadBalancerArn=load_balancer_arn, @@ -413,6 +419,45 @@ def test_create_target_group_and_listeners(): response.get('TargetGroups').should.have.length_of(0) +@mock_elbv2 +@mock_ec2 +def test_create_target_group_without_non_required_parameters(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + # request without HealthCheckIntervalSeconds parameter + # which is default to 30 seconds + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080' + ) + target_group = response.get('TargetGroups')[0] + target_group.should_not.be.none + + @mock_elbv2 @mock_ec2 def test_create_invalid_target_group(): @@ -493,7 +538,7 @@ def test_describe_paginated_balancers(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') for i in range(51): @@ -528,7 +573,7 @@ def test_delete_load_balancer(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -561,7 +606,7 @@ def test_register_targets(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') conn.create_load_balancer( @@ -622,6 +667,91 @@ def test_register_targets(): response.get('TargetHealthDescriptions').should.have.length_of(1) +@mock_ec2 +@mock_elbv2 +def test_stopped_instance_target(): + target_group_port = 8080 + + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=target_group_port, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # No targets registered yet + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(0) + + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=1, MaxCount=1) + instance = response[0] + + target_dict = { + 'Id': instance.id, + 'Port': 500 + } + + response = conn.register_targets( + TargetGroupArn=target_group.get('TargetGroupArn'), + Targets=[target_dict]) + + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(1) + target_health_description = response.get('TargetHealthDescriptions')[0] + + target_health_description['Target'].should.equal(target_dict) + target_health_description['HealthCheckPort'].should.equal(str(target_group_port)) + target_health_description['TargetHealth'].should.equal({ + 'State': 'healthy' + }) + + instance.stop() + + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(1) + target_health_description = response.get('TargetHealthDescriptions')[0] + target_health_description['Target'].should.equal(target_dict) + target_health_description['HealthCheckPort'].should.equal(str(target_group_port)) + target_health_description['TargetHealth'].should.equal({ + 'State': 'unused', + 'Reason': 'Target.InvalidState', + 'Description': 'Target is in the stopped state' + }) + + @mock_ec2 @mock_elbv2 def test_target_group_attributes(): @@ -637,7 +767,7 @@ def test_target_group_attributes(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -728,7 +858,7 @@ def test_handle_listener_rules(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -1033,7 +1163,7 @@ def test_describe_invalid_target_group(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = conn.create_load_balancer( @@ -1064,6 +1194,50 @@ def test_describe_invalid_target_group(): conn.describe_target_groups(Names=['invalid']) +@mock_elbv2 +@mock_ec2 +def test_describe_target_groups_no_arguments(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers')[0].get('LoadBalancerArn') + + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + assert len(conn.describe_target_groups()['TargetGroups']) == 1 + + @mock_elbv2 def test_describe_account_limits(): client = boto3.client('elbv2', region_name='eu-central-1') @@ -1099,7 +1273,7 @@ def test_set_ip_address_type(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = client.create_load_balancer( @@ -1149,7 +1323,7 @@ def test_set_security_groups(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = client.create_load_balancer( @@ -1186,11 +1360,11 @@ def test_set_subnets(): vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') subnet1 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.64/26', AvailabilityZone='us-east-1b') subnet3 = ec2.create_subnet( VpcId=vpc.id, @@ -1243,7 +1417,7 @@ def test_set_subnets(): AvailabilityZone='us-east-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='us-east-1b') response = client.create_load_balancer( @@ -1332,7 +1506,7 @@ def test_modify_listener_http_to_https(): AvailabilityZone='eu-central-1a') subnet2 = ec2.create_subnet( VpcId=vpc.id, - CidrBlock='172.28.7.192/26', + CidrBlock='172.28.7.0/26', AvailabilityZone='eu-central-1b') response = client.create_load_balancer( @@ -1432,3 +1606,208 @@ def test_modify_listener_http_to_https(): {'Type': 'forward', 'TargetGroupArn': target_group_arn} ] ) + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_create_target_groups_through_cloudformation(): + cfn_conn = boto3.client('cloudformation', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + + # test that setting a name manually as well as letting cloudformation create a name both work + # this is a special case because test groups have a name length limit of 22 characters, and must be unique + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + }, + }, + "testGroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 80, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 90, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup3": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Name": "MyTargetGroup", + "Port": 70, + "Protocol": "HTTPS", + "VpcId": {"Ref": "testVPC"}, + }, + }, + } + } + template_json = json.dumps(template) + cfn_conn.create_stack( + StackName="test-stack", + TemplateBody=template_json, + ) + + describe_target_groups_response = elbv2_client.describe_target_groups() + target_group_dicts = describe_target_groups_response['TargetGroups'] + assert len(target_group_dicts) == 3 + + # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) + # and one named MyTargetGroup + assert len([tg for tg in target_group_dicts if tg['TargetGroupName'] == 'MyTargetGroup']) == 1 + assert len( + [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] + ) == 2 + + +@mock_elbv2 +@mock_ec2 +def test_redirect_action_listener_rule(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.128/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_listener(LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[ + {'Type': 'redirect', + 'RedirectConfig': { + 'Protocol': 'HTTPS', + 'Port': '443', + 'StatusCode': 'HTTP_301' + }}]) + + listener = response.get('Listeners')[0] + expected_default_actions = [{ + 'Type': 'redirect', + 'RedirectConfig': { + 'Protocol': 'HTTPS', + 'Port': '443', + 'StatusCode': 'HTTP_301' + } + }] + listener.get('DefaultActions').should.equal(expected_default_actions) + listener_arn = listener.get('ListenerArn') + + describe_rules_response = conn.describe_rules(ListenerArn=listener_arn) + describe_rules_response['Rules'][0]['Actions'].should.equal(expected_default_actions) + + describe_listener_response = conn.describe_listeners(ListenerArns=[listener_arn, ]) + describe_listener_actions = describe_listener_response['Listeners'][0]['DefaultActions'] + describe_listener_actions.should.equal(expected_default_actions) + + modify_listener_response = conn.modify_listener(ListenerArn=listener_arn, Port=81) + modify_listener_actions = modify_listener_response['Listeners'][0]['DefaultActions'] + modify_listener_actions.should.equal(expected_default_actions) + + +@mock_elbv2 +@mock_cloudformation +def test_redirect_action_listener_rule_cloudformation(): + cnf_conn = boto3.client('cloudformation', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + }, + }, + "subnet1": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "subnet2": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.1.0/24", + "VpcId": {"Ref": "testVPC"}, + "AvalabilityZone": "us-east-1b", + }, + }, + "testLb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "my-lb", + "Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}], + "Type": "application", + "SecurityGroups": [], + } + }, + "testListener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "LoadBalancerArn": {"Ref": "testLb"}, + "Port": 80, + "Protocol": "HTTP", + "DefaultActions": [{ + "Type": "redirect", + "RedirectConfig": { + "Port": "443", + "Protocol": "HTTPS", + "StatusCode": "HTTP_301", + } + }] + } + + } + } + } + template_json = json.dumps(template) + cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json) + + describe_load_balancers_response = elbv2_client.describe_load_balancers(Names=['my-lb',]) + describe_load_balancers_response['LoadBalancers'].should.have.length_of(1) + load_balancer_arn = describe_load_balancers_response['LoadBalancers'][0]['LoadBalancerArn'] + + describe_listeners_response = elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn) + + describe_listeners_response['Listeners'].should.have.length_of(1) + describe_listeners_response['Listeners'][0]['DefaultActions'].should.equal([{ + 'Type': 'redirect', + 'RedirectConfig': { + 'Port': '443', 'Protocol': 'HTTPS', 'StatusCode': 'HTTP_301', + } + },]) diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py index 4acd7067c..505c69b11 100644 --- a/tests/test_emr/test_emr.py +++ b/tests/test_emr/test_emr.py @@ -443,7 +443,7 @@ def test_bootstrap_actions(): BootstrapAction( name='bs1', path='path/to/script', - bootstrap_action_args=['arg1', 'arg2']), + bootstrap_action_args=['arg1', 'arg2&arg3']), BootstrapAction( name='bs2', path='path/to/anotherscript', @@ -551,7 +551,7 @@ def test_steps(): input='s3n://elasticmapreduce/samples/wordcount/input', output='s3n://output_bucket/output/wordcount_output'), StreamingStep( - name='My wordcount example2', + name='My wordcount example & co.', mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', reducer='aggregate', input='s3n://elasticmapreduce/samples/wordcount/input2', diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 237ff8bba..b9a5025d9 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -432,6 +432,47 @@ def test_run_job_flow_with_instance_groups(): x['BidPrice'].should.equal(y['BidPrice']) +@mock_emr +def test_run_job_flow_with_custom_ami(): + client = boto3.client('emr', region_name='us-east-1') + + with assert_raises(ClientError) as ex: + # CustomAmiId available in Amazon EMR 5.7.0 and later + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomId' + args['ReleaseLabel'] = 'emr-5.6.0' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.equal('Custom AMI is not allowed') + + with assert_raises(ClientError) as ex: + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomId' + args['AmiVersion'] = '3.8.1' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.equal( + 'Custom AMI is not supported in this version of EMR') + + with assert_raises(ClientError) as ex: + # AMI version and release label exception raises before CustomAmi exception + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomId' + args['ReleaseLabel'] = 'emr-5.6.0' + args['AmiVersion'] = '3.8.1' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['Error']['Message'].should.contain( + 'Only one AMI version and release label may be specified.') + + args = deepcopy(run_job_flow_args) + args['CustomAmiId'] = 'MyEmrCustomAmi' + args['ReleaseLabel'] = 'emr-5.7.0' + cluster_id = client.run_job_flow(**args)['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['CustomAmiId'].should.equal('MyEmrCustomAmi') + + @mock_emr def test_set_termination_protection(): client = boto3.client('emr', region_name='us-east-1') diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index e839bde5b..a9d90ec32 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -1,12 +1,11 @@ import random - import boto3 +import json from moto.events import mock_events from botocore.exceptions import ClientError from nose.tools import assert_raises - RULES = [ {'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'}, {'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'}, @@ -108,6 +107,13 @@ def test_enable_disable_rule(): rule = client.describe_rule(Name=rule_name) assert(rule['State'] == 'ENABLED') + # Test invalid name + try: + client.enable_rule(Name='junk') + + except ClientError as ce: + assert ce.response['Error']['Code'] == 'ResourceNotFoundException' + @mock_events def test_list_rule_names_by_target(): @@ -177,17 +183,19 @@ def test_remove_targets(): def test_permissions(): client = boto3.client('events', 'eu-central-1') - client.put_permission(Action='PutEvents', Principal='111111111111', StatementId='Account1') - client.put_permission(Action='PutEvents', Principal='222222222222', StatementId='Account2') + client.put_permission(Action='events:PutEvents', Principal='111111111111', StatementId='Account1') + client.put_permission(Action='events:PutEvents', Principal='222222222222', StatementId='Account2') resp = client.describe_event_bus() - assert len(resp['Policy']['Statement']) == 2 + resp_policy = json.loads(resp['Policy']) + assert len(resp_policy['Statement']) == 2 client.remove_permission(StatementId='Account2') resp = client.describe_event_bus() - assert len(resp['Policy']['Statement']) == 1 - assert resp['Policy']['Statement'][0]['Sid'] == 'Account1' + resp_policy = json.loads(resp['Policy']) + assert len(resp_policy['Statement']) == 1 + assert resp_policy['Statement'][0]['Sid'] == 'Account1' @mock_events diff --git a/tests/test_glacier/test_glacier_jobs.py b/tests/test_glacier/test_glacier_jobs.py index 66780f681..152aa14c8 100644 --- a/tests/test_glacier/test_glacier_jobs.py +++ b/tests/test_glacier/test_glacier_jobs.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +import time from boto.glacier.layer1 import Layer1 import sure # noqa @@ -39,24 +40,11 @@ def test_describe_job(): job_id = job_response['JobId'] job = conn.describe_job(vault_name, job_id) - json.loads(job.read().decode("utf-8")).should.equal({ - 'CompletionDate': '2013-03-20T17:03:43.221Z', - 'VaultARN': None, - 'RetrievalByteRange': None, - 'SHA256TreeHash': None, - 'Completed': True, - 'InventorySizeInBytes': '0', - 'JobId': job_id, - 'Action': 'InventoryRetrieval', - 'JobDescription': None, - 'SNSTopic': None, - 'ArchiveSizeInBytes': 0, - 'ArchiveId': archive_id, - 'ArchiveSHA256TreeHash': None, - 'CreationDate': '2013-03-20T17:03:43.221Z', - 'StatusMessage': None, - 'StatusCode': 'Succeeded', - }) + joboutput = json.loads(job.read().decode("utf-8")) + + joboutput.should.have.key('Tier').which.should.equal('Standard') + joboutput.should.have.key('StatusCode').which.should.equal('InProgress') + joboutput.should.have.key('VaultARN').which.should.equal('arn:aws:glacier:RegionInfo:us-west-2:012345678901:vaults/my_vault') @mock_glacier_deprecated @@ -96,5 +84,7 @@ def test_get_job_output(): }) job_id = job_response['JobId'] + time.sleep(6) + output = conn.get_job_output(vault_name, job_id) output.read().decode("utf-8").should.equal("some stuff") diff --git a/tests/test_glue/__init__.py b/tests/test_glue/__init__.py new file mode 100644 index 000000000..baffc4882 --- /dev/null +++ b/tests/test_glue/__init__.py @@ -0,0 +1 @@ +from __future__ import unicode_literals diff --git a/tests/test_glue/fixtures/__init__.py b/tests/test_glue/fixtures/__init__.py new file mode 100644 index 000000000..baffc4882 --- /dev/null +++ b/tests/test_glue/fixtures/__init__.py @@ -0,0 +1 @@ +from __future__ import unicode_literals diff --git a/tests/test_glue/fixtures/datacatalog.py b/tests/test_glue/fixtures/datacatalog.py new file mode 100644 index 000000000..edad2f0f4 --- /dev/null +++ b/tests/test_glue/fixtures/datacatalog.py @@ -0,0 +1,56 @@ +from __future__ import unicode_literals + +TABLE_INPUT = { + 'Owner': 'a_fake_owner', + 'Parameters': { + 'EXTERNAL': 'TRUE', + }, + 'Retention': 0, + 'StorageDescriptor': { + 'BucketColumns': [], + 'Compressed': False, + 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', + 'NumberOfBuckets': -1, + 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', + 'Parameters': {}, + 'SerdeInfo': { + 'Parameters': { + 'serialization.format': '1' + }, + 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' + }, + 'SkewedInfo': { + 'SkewedColumnNames': [], + 'SkewedColumnValueLocationMaps': {}, + 'SkewedColumnValues': [] + }, + 'SortColumns': [], + 'StoredAsSubDirectories': False + }, + 'TableType': 'EXTERNAL_TABLE', +} + + +PARTITION_INPUT = { + # 'DatabaseName': 'dbname', + 'StorageDescriptor': { + 'BucketColumns': [], + 'Columns': [], + 'Compressed': False, + 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', + 'Location': 's3://.../partition=value', + 'NumberOfBuckets': -1, + 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', + 'Parameters': {}, + 'SerdeInfo': { + 'Parameters': {'path': 's3://...', 'serialization.format': '1'}, + 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'}, + 'SkewedInfo': {'SkewedColumnNames': [], + 'SkewedColumnValueLocationMaps': {}, + 'SkewedColumnValues': []}, + 'SortColumns': [], + 'StoredAsSubDirectories': False, + }, + # 'TableName': 'source_table', + # 'Values': ['2018-06-26'], +} diff --git a/tests/test_glue/helpers.py b/tests/test_glue/helpers.py new file mode 100644 index 000000000..331b99867 --- /dev/null +++ b/tests/test_glue/helpers.py @@ -0,0 +1,119 @@ +from __future__ import unicode_literals + +import copy + +from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT + + +def create_database(client, database_name): + return client.create_database( + DatabaseInput={ + 'Name': database_name + } + ) + + +def get_database(client, database_name): + return client.get_database(Name=database_name) + + +def create_table_input(database_name, table_name, columns=[], partition_keys=[]): + table_input = copy.deepcopy(TABLE_INPUT) + table_input['Name'] = table_name + table_input['PartitionKeys'] = partition_keys + table_input['StorageDescriptor']['Columns'] = columns + table_input['StorageDescriptor']['Location'] = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) + return table_input + + +def create_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.create_table( + DatabaseName=database_name, + TableInput=table_input + ) + + +def update_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.update_table( + DatabaseName=database_name, + TableInput=table_input, + ) + + +def get_table(client, database_name, table_name): + return client.get_table( + DatabaseName=database_name, + Name=table_name + ) + + +def get_tables(client, database_name): + return client.get_tables( + DatabaseName=database_name + ) + + +def get_table_versions(client, database_name, table_name): + return client.get_table_versions( + DatabaseName=database_name, + TableName=table_name + ) + + +def get_table_version(client, database_name, table_name, version_id): + return client.get_table_version( + DatabaseName=database_name, + TableName=table_name, + VersionId=version_id, + ) + + +def create_partition_input(database_name, table_name, values=[], columns=[]): + root_path = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) + + part_input = copy.deepcopy(PARTITION_INPUT) + part_input['Values'] = values + part_input['StorageDescriptor']['Columns'] = columns + part_input['StorageDescriptor']['SerdeInfo']['Parameters']['path'] = root_path + return part_input + + +def create_partition(client, database_name, table_name, partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input + ) + + +def update_partition(client, database_name, table_name, old_values=[], partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.update_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input, + PartitionValueList=old_values, + ) + + +def get_partition(client, database_name, table_name, values): + return client.get_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionValues=values, + ) diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py new file mode 100644 index 000000000..9034feb55 --- /dev/null +++ b/tests/test_glue/test_datacatalog.py @@ -0,0 +1,699 @@ +from __future__ import unicode_literals + +import sure # noqa +import re +from nose.tools import assert_raises +import boto3 +from botocore.client import ClientError + + +from datetime import datetime +import pytz + +from moto import mock_glue +from . import helpers + + +@mock_glue +def test_create_database(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + response = helpers.get_database(client, database_name) + database = response['Database'] + + database.should.equal({'Name': database_name}) + + +@mock_glue +def test_create_database_already_exists(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'cantcreatethisdatabasetwice' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.create_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_create_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + response = helpers.get_table(client, database_name, table_name) + table = response['Table'] + + table['Name'].should.equal(table_input['Name']) + table['StorageDescriptor'].should.equal(table_input['StorageDescriptor']) + table['PartitionKeys'].should.equal(table_input['PartitionKeys']) + + +@mock_glue +def test_create_table_already_exists(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'cantcreatethistabletwice' + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.create_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_tables(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_names = ['myfirsttable', 'mysecondtable', 'mythirdtable'] + table_inputs = {} + + for table_name in table_names: + table_input = helpers.create_table_input(database_name, table_name) + table_inputs[table_name] = table_input + helpers.create_table(client, database_name, table_name, table_input) + + response = helpers.get_tables(client, database_name) + + tables = response['TableList'] + + tables.should.have.length_of(3) + + for table in tables: + table_name = table['Name'] + table_name.should.equal(table_inputs[table_name]['Name']) + table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor']) + table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys']) + + +@mock_glue +def test_get_table_versions(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myfirsttable' + version_inputs = {} + + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + version_inputs["1"] = table_input + + columns = [{'Name': 'country', 'Type': 'string'}] + table_input = helpers.create_table_input(database_name, table_name, columns=columns) + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["2"] = table_input + + # Updateing with an indentical input should still create a new version + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["3"] = table_input + + response = helpers.get_table_versions(client, database_name, table_name) + + vers = response['TableVersions'] + + vers.should.have.length_of(3) + vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([]) + vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + for n, ver in enumerate(vers): + n = str(n + 1) + ver['VersionId'].should.equal(n) + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor']) + ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys']) + + response = helpers.get_table_version(client, database_name, table_name, "3") + ver = response['TableVersion'] + + ver['VersionId'].should.equal("3") + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + +@mock_glue +def test_get_table_version_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "20") + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('version', re.I) + + +@mock_glue +def test_get_table_version_invalid_input(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int") + + exc.exception.response['Error']['Code'].should.equal('InvalidInputException') + + +@mock_glue +def test_get_table_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found') + + +@mock_glue +def test_get_table_when_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_delete_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + result = client.delete_table(DatabaseName=database_name, Name=table_name) + result['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # confirm table is deleted + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myspecialtable not found') + +@mock_glue +def test_batch_delete_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + result = client.batch_delete_table(DatabaseName=database_name, TablesToDelete=[table_name]) + result['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # confirm table is deleted + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myspecialtable not found') + + +@mock_glue +def test_get_partitions_empty(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + response['Partitions'].should.have.length_of(0) + + +@mock_glue +def test_create_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + before = datetime.now(pytz.utc) + + part_input = helpers.create_partition_input(database_name, table_name, values=values) + helpers.create_partition(client, database_name, table_name, part_input) + + after = datetime.now(pytz.utc) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + partitions = response['Partitions'] + + partitions.should.have.length_of(1) + + partition = partitions[0] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor']) + partition['Values'].should.equal(values) + partition['CreationTime'].should.be.greater_than(before) + partition['CreationTime'].should.be.lower_than(after) + + +@mock_glue +def test_create_partition_already_exist(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + helpers.create_partition(client, database_name, table_name, values=values) + + with assert_raises(ClientError) as exc: + helpers.create_partition(client, database_name, table_name, values=values) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_partition_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + +@mock_glue +def test_batch_create_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + before = datetime.now(pytz.utc) + + partition_inputs = [] + for i in range(0, 20): + values = ["2018-10-{:2}".format(i)] + part_input = helpers.create_partition_input(database_name, table_name, values=values) + partition_inputs.append(part_input) + + client.batch_create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInputList=partition_inputs + ) + + after = datetime.now(pytz.utc) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + partitions = response['Partitions'] + + partitions.should.have.length_of(20) + + for idx, partition in enumerate(partitions): + partition_input = partition_inputs[idx] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor'].should.equal(partition_input['StorageDescriptor']) + partition['Values'].should.equal(partition_input['Values']) + partition['CreationTime'].should.be.greater_than(before) + partition['CreationTime'].should.be.lower_than(after) + + +@mock_glue +def test_batch_create_partition_already_exist(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + helpers.create_partition(client, database_name, table_name, values=values) + + partition_input = helpers.create_partition_input(database_name, table_name, values=values) + + response = client.batch_create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInputList=[partition_input] + ) + + response.should.have.key('Errors') + response['Errors'].should.have.length_of(1) + response['Errors'][0]['PartitionValues'].should.equal(values) + response['Errors'][0]['ErrorDetail']['ErrorCode'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1]) + + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['Values'].should.equal(values[1]) + + +@mock_glue +def test_batch_get_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + partitions_to_get = [ + {'Values': values[0]}, + {'Values': values[1]}, + ] + response = client.batch_get_partition(DatabaseName=database_name, TableName=table_name, PartitionsToGet=partitions_to_get) + + partitions = response['Partitions'] + partitions.should.have.length_of(2) + + partition = partitions[1] + partition['TableName'].should.equal(table_name) + partition['Values'].should.equal(values[1]) + + +@mock_glue +def test_batch_get_partition_missing_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01'], ['2018-08-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[2]) + + partitions_to_get = [ + {'Values': values[0]}, + {'Values': values[1]}, + {'Values': values[2]}, + ] + response = client.batch_get_partition(DatabaseName=database_name, TableName=table_name, PartitionsToGet=partitions_to_get) + + partitions = response['Partitions'] + partitions.should.have.length_of(2) + + partitions[0]['Values'].should.equal(values[0]) + partitions[1]['Values'].should.equal(values[2]) + + + +@mock_glue +def test_update_partition_not_found_moving(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02']) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_not_found_change_in_place(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values, values=values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_cannot_overwrite(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1]) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_update_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) + + +@mock_glue +def test_update_partition_move(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + new_values = ['2018-09-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=new_values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + # Old partition shouldn't exist anymore + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) + +@mock_glue +def test_delete_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + part_input = helpers.create_partition_input(database_name, table_name, values=values) + helpers.create_partition(client, database_name, table_name, part_input) + + client.delete_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionValues=values, + ) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + partitions = response['Partitions'] + partitions.should.be.empty + +@mock_glue +def test_delete_partition_bad_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + client.delete_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionValues=values, + ) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + +@mock_glue +def test_batch_delete_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + partition_inputs = [] + for i in range(0, 20): + values = ["2018-10-{:2}".format(i)] + part_input = helpers.create_partition_input(database_name, table_name, values=values) + partition_inputs.append(part_input) + + client.batch_create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInputList=partition_inputs + ) + + partition_values = [{"Values": p["Values"]} for p in partition_inputs] + + response = client.batch_delete_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionsToDelete=partition_values, + ) + + response.should_not.have.key('Errors') + +@mock_glue +def test_batch_delete_partition_with_bad_partitions(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + partition_inputs = [] + for i in range(0, 20): + values = ["2018-10-{:2}".format(i)] + part_input = helpers.create_partition_input(database_name, table_name, values=values) + partition_inputs.append(part_input) + + client.batch_create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInputList=partition_inputs + ) + + partition_values = [{"Values": p["Values"]} for p in partition_inputs] + + partition_values.insert(5, {"Values": ["2018-11-01"]}) + partition_values.insert(10, {"Values": ["2018-11-02"]}) + partition_values.insert(15, {"Values": ["2018-11-03"]}) + + response = client.batch_delete_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionsToDelete=partition_values, + ) + + response.should.have.key('Errors') + response['Errors'].should.have.length_of(3) + error_partitions = map(lambda x: x['PartitionValues'], response['Errors']) + ['2018-11-01'].should.be.within(error_partitions) + ['2018-11-02'].should.be.within(error_partitions) + ['2018-11-03'].should.be.within(error_partitions) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index a80768101..e7507e2e5 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -1,9 +1,12 @@ from __future__ import unicode_literals import base64 +import json import boto import boto3 +import os import sure # noqa +import sys from boto.exception import BotoServerError from botocore.exceptions import ClientError from moto import mock_iam, mock_iam_deprecated @@ -11,9 +14,61 @@ from moto.iam.models import aws_managed_policies from nose.tools import assert_raises, assert_equals from nose.tools import raises +from datetime import datetime from tests.helpers import requires_boto_gte +MOCK_CERT = """-----BEGIN CERTIFICATE----- +MIIBpzCCARACCQCY5yOdxCTrGjANBgkqhkiG9w0BAQsFADAXMRUwEwYDVQQKDAxt +b3RvIHRlc3RpbmcwIBcNMTgxMTA1MTkwNTIwWhgPMjI5MjA4MTkxOTA1MjBaMBcx +FTATBgNVBAoMDG1vdG8gdGVzdGluZzCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC +gYEA1Jn3g2h7LD3FLqdpcYNbFXCS4V4eDpuTCje9vKFcC3pi/01147X3zdfPy8Mt +ZhKxcREOwm4NXykh23P9KW7fBovpNwnbYsbPqj8Hf1ZaClrgku1arTVhEnKjx8zO +vaR/bVLCss4uE0E0VM1tJn/QGQsfthFsjuHtwx8uIWz35tUCAwEAATANBgkqhkiG +9w0BAQsFAAOBgQBWdOQ7bDc2nWkUhFjZoNIZrqjyNdjlMUndpwREVD7FQ/DuxJMj +FyDHrtlrS80dPUQWNYHw++oACDpWO01LGLPPrGmuO/7cOdojPEd852q5gd+7W9xt +8vUH+pBa6IBLbvBp+szli51V3TLSWcoyy4ceJNQU2vCkTLoFdS0RLd/7tQ== +-----END CERTIFICATE-----""" + +MOCK_POLICY = """ +{ + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } +} +""" + +MOCK_POLICY_2 = """ +{ + "Version": "2012-10-17", + "Id": "2", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } +} +""" + +MOCK_POLICY_3 = """ +{ + "Version": "2012-10-17", + "Id": "3", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } +} +""" + + @mock_iam_deprecated() def test_get_all_server_certs(): conn = boto.connect_iam() @@ -58,6 +113,19 @@ def test_upload_server_cert(): "arn:aws:iam::123456789012:server-certificate/certname") +@mock_iam_deprecated() +def test_delete_server_cert(): + conn = boto.connect_iam() + + conn.upload_server_cert("certname", "certbody", "privatekey") + conn.get_server_certificate("certname") + conn.delete_server_cert("certname") + with assert_raises(BotoServerError): + conn.get_server_certificate("certname") + with assert_raises(BotoServerError): + conn.delete_server_cert("certname") + + @mock_iam_deprecated() @raises(BotoServerError) def test_get_role__should_throw__when_role_does_not_exist(): @@ -95,6 +163,9 @@ def test_create_role_and_instance_profile(): conn.list_roles().roles[0].role_name.should.equal('my-role') + # Test with an empty path: + profile = conn.create_instance_profile('my-other-profile') + profile.path.should.equal('/') @mock_iam_deprecated() def test_remove_role_from_instance_profile(): @@ -211,12 +282,12 @@ def test_list_instance_profiles_for_role(): def test_list_role_policies(): conn = boto.connect_iam() conn.create_role("my-role") - conn.put_role_policy("my-role", "test policy", "my policy") + conn.put_role_policy("my-role", "test policy", MOCK_POLICY) role = conn.list_role_policies("my-role") role.policy_names.should.have.length_of(1) role.policy_names[0].should.equal("test policy") - conn.put_role_policy("my-role", "test policy 2", "another policy") + conn.put_role_policy("my-role", "test policy 2", MOCK_POLICY) role = conn.list_role_policies("my-role") role.policy_names.should.have.length_of(2) @@ -234,12 +305,21 @@ def test_put_role_policy(): conn = boto.connect_iam() conn.create_role( "my-role", assume_role_policy_document="some policy", path="my-path") - conn.put_role_policy("my-role", "test policy", "my policy") + conn.put_role_policy("my-role", "test policy", MOCK_POLICY) policy = conn.get_role_policy( "my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] policy.should.equal("test policy") +@mock_iam +def test_get_role_policy(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role( + RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="my-path") + with assert_raises(conn.exceptions.NoSuchEntityException): + conn.get_role_policy(RoleName="my-role", PolicyName="does-not-exist") + + @mock_iam_deprecated() def test_update_assume_role_policy(): conn = boto.connect_iam() @@ -249,20 +329,102 @@ def test_update_assume_role_policy(): role.assume_role_policy_document.should.equal("my-policy") +@mock_iam +def test_create_policy(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument=MOCK_POLICY) + response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestCreatePolicy") + + @mock_iam def test_create_policy_versions(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): conn.create_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestCreatePolicyVersion", + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", PolicyDocument='{"some":"policy"}') conn.create_policy( PolicyName="TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyDocument=MOCK_POLICY) version = conn.create_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') - version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + PolicyDocument=MOCK_POLICY, + SetAsDefault=True) + version.get('PolicyVersion').get('Document').should.equal(json.loads(MOCK_POLICY)) + version.get('PolicyVersion').get('VersionId').should.equal("v2") + version.get('PolicyVersion').get('IsDefaultVersion').should.be.ok + conn.delete_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + VersionId="v1") + version = conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + PolicyDocument=MOCK_POLICY) + version.get('PolicyVersion').get('VersionId').should.equal("v3") + version.get('PolicyVersion').get('IsDefaultVersion').shouldnt.be.ok + + +@mock_iam +def test_create_many_policy_versions(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestCreateManyPolicyVersions", + PolicyDocument=MOCK_POLICY) + for _ in range(0, 4): + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreateManyPolicyVersions", + PolicyDocument=MOCK_POLICY) + with assert_raises(ClientError): + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreateManyPolicyVersions", + PolicyDocument=MOCK_POLICY) + + +@mock_iam +def test_set_default_policy_version(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestSetDefaultPolicyVersion", + PolicyDocument=MOCK_POLICY) + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestSetDefaultPolicyVersion", + PolicyDocument=MOCK_POLICY_2, + SetAsDefault=True) + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestSetDefaultPolicyVersion", + PolicyDocument=MOCK_POLICY_3, + SetAsDefault=True) + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestSetDefaultPolicyVersion") + versions.get('Versions')[0].get('Document').should.equal(json.loads(MOCK_POLICY)) + versions.get('Versions')[0].get('IsDefaultVersion').shouldnt.be.ok + versions.get('Versions')[1].get('Document').should.equal(json.loads(MOCK_POLICY_2)) + versions.get('Versions')[1].get('IsDefaultVersion').shouldnt.be.ok + versions.get('Versions')[2].get('Document').should.equal(json.loads(MOCK_POLICY_3)) + versions.get('Versions')[2].get('IsDefaultVersion').should.be.ok + + +@mock_iam +def test_get_policy(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_policy( + PolicyName="TestGetPolicy", + PolicyDocument=MOCK_POLICY) + policy = conn.get_policy( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicy") + policy['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") + + +@mock_iam +def test_get_aws_managed_policy(): + conn = boto3.client('iam', region_name='us-east-1') + managed_policy_arn = 'arn:aws:iam::aws:policy/IAMUserChangePassword' + managed_policy_create_date = datetime.strptime("2016-11-15T00:25:16+00:00", "%Y-%m-%dT%H:%M:%S+00:00") + policy = conn.get_policy( + PolicyArn=managed_policy_arn) + policy['Policy']['Arn'].should.equal(managed_policy_arn) + policy['Policy']['CreateDate'].replace(tzinfo=None).should.equal(managed_policy_create_date) @mock_iam @@ -270,18 +432,51 @@ def test_get_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestGetPolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyDocument=MOCK_POLICY) version = conn.create_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestGetPolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", + PolicyDocument=MOCK_POLICY) with assert_raises(ClientError): conn.get_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestGetPolicyVersion", + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", VersionId='v2-does-not-exist') retrieved = conn.get_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestGetPolicyVersion", + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", VersionId=version.get('PolicyVersion').get('VersionId')) - retrieved.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) + retrieved.get('PolicyVersion').get('Document').should.equal(json.loads(MOCK_POLICY)) + retrieved.get('PolicyVersion').get('IsDefaultVersion').shouldnt.be.ok + + +@mock_iam +def test_get_aws_managed_policy_version(): + conn = boto3.client('iam', region_name='us-east-1') + managed_policy_arn = 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole' + managed_policy_version_create_date = datetime.strptime("2015-04-09T15:03:43+00:00", "%Y-%m-%dT%H:%M:%S+00:00") + with assert_raises(ClientError): + conn.get_policy_version( + PolicyArn=managed_policy_arn, + VersionId='v2-does-not-exist') + retrieved = conn.get_policy_version( + PolicyArn=managed_policy_arn, + VersionId="v1") + retrieved['PolicyVersion']['CreateDate'].replace(tzinfo=None).should.equal(managed_policy_version_create_date) + retrieved['PolicyVersion']['Document'].should.be.an(dict) + + +@mock_iam +def test_get_aws_managed_policy_v4_version(): + conn = boto3.client('iam', region_name='us-east-1') + managed_policy_arn = 'arn:aws:iam::aws:policy/job-function/SystemAdministrator' + managed_policy_version_create_date = datetime.strptime("2018-10-08T21:33:45+00:00", "%Y-%m-%dT%H:%M:%S+00:00") + with assert_raises(ClientError): + conn.get_policy_version( + PolicyArn=managed_policy_arn, + VersionId='v2-does-not-exist') + retrieved = conn.get_policy_version( + PolicyArn=managed_policy_arn, + VersionId="v4") + retrieved['PolicyVersion']['CreateDate'].replace(tzinfo=None).should.equal(managed_policy_version_create_date) + retrieved['PolicyVersion']['Document'].should.be.an(dict) @mock_iam @@ -289,20 +484,27 @@ def test_list_policy_versions(): conn = boto3.client('iam', region_name='us-east-1') with assert_raises(ClientError): versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::aws:policy/TestListPolicyVersions") + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") conn.create_policy( PolicyName="TestListPolicyVersions", - PolicyDocument='{"some":"policy"}') - conn.create_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestListPolicyVersions", - PolicyDocument='{"first":"policy"}') - conn.create_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestListPolicyVersions", - PolicyDocument='{"second":"policy"}') + PolicyDocument=MOCK_POLICY) versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::aws:policy/TestListPolicyVersions") - versions.get('Versions')[0].get('Document').should.equal({'first': 'policy'}) - versions.get('Versions')[1].get('Document').should.equal({'second': 'policy'}) + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + versions.get('Versions')[0].get('VersionId').should.equal('v1') + versions.get('Versions')[0].get('IsDefaultVersion').should.be.ok + + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", + PolicyDocument=MOCK_POLICY_2) + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", + PolicyDocument=MOCK_POLICY_3) + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + versions.get('Versions')[1].get('Document').should.equal(json.loads(MOCK_POLICY_2)) + versions.get('Versions')[1].get('IsDefaultVersion').shouldnt.be.ok + versions.get('Versions')[2].get('Document').should.equal(json.loads(MOCK_POLICY_3)) + versions.get('Versions')[2].get('IsDefaultVersion').shouldnt.be.ok @mock_iam @@ -310,20 +512,35 @@ def test_delete_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestDeletePolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyDocument=MOCK_POLICY) conn.create_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestDeletePolicyVersion", - PolicyDocument='{"first":"policy"}') + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + PolicyDocument=MOCK_POLICY) with assert_raises(ClientError): conn.delete_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestDeletePolicyVersion", + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", VersionId='v2-nope-this-does-not-exist') conn.delete_policy_version( - PolicyArn="arn:aws:iam::aws:policy/TestDeletePolicyVersion", - VersionId='v1') + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + VersionId='v2') versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::aws:policy/TestDeletePolicyVersion") - len(versions.get('Versions')).should.equal(0) + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion") + len(versions.get('Versions')).should.equal(1) + + +@mock_iam +def test_delete_default_policy_version(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestDeletePolicyVersion", + PolicyDocument=MOCK_POLICY) + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + PolicyDocument=MOCK_POLICY_2) + with assert_raises(ClientError): + conn.delete_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + VersionId='v1') @mock_iam_deprecated() @@ -343,6 +560,19 @@ def test_get_user(): conn.get_user('my-user') +@mock_iam() +def test_update_user(): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(conn.exceptions.NoSuchEntityException): + conn.update_user(UserName='my-user') + conn.create_user(UserName='my-user') + conn.update_user(UserName='my-user', NewPath='/new-path/', NewUserName='new-user') + response = conn.get_user(UserName='new-user') + response['User'].get('Path').should.equal('/new-path/') + with assert_raises(conn.exceptions.NoSuchEntityException): + conn.get_user(UserName='my-user') + + @mock_iam_deprecated() def test_get_current_user(): """If no user is specific, IAM returns the current user""" @@ -367,22 +597,20 @@ def test_list_users(): @mock_iam() def test_user_policies(): policy_name = 'UserManagedPolicy' - policy_document = "{'mypolicy': 'test'}" user_name = 'my-user' conn = boto3.client('iam', region_name='us-east-1') conn.create_user(UserName=user_name) conn.put_user_policy( UserName=user_name, PolicyName=policy_name, - PolicyDocument=policy_document + PolicyDocument=MOCK_POLICY ) policy_doc = conn.get_user_policy( UserName=user_name, PolicyName=policy_name ) - test = policy_document in policy_doc['PolicyDocument'] - test.should.equal(True) + policy_doc['PolicyDocument'].should.equal(json.loads(MOCK_POLICY)) policies = conn.list_user_policies(UserName=user_name) len(policies['PolicyNames']).should.equal(1) @@ -418,13 +646,17 @@ def test_delete_login_profile(): conn.delete_login_profile('my-user') -@mock_iam_deprecated() +@mock_iam() def test_create_access_key(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.create_access_key('my-user') - conn.create_user('my-user') - conn.create_access_key('my-user') + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(ClientError): + conn.create_access_key(UserName='my-user') + conn.create_user(UserName='my-user') + access_key = conn.create_access_key(UserName='my-user')["AccessKey"] + (datetime.utcnow() - access_key["CreateDate"].replace(tzinfo=None)).seconds.should.be.within(0, 10) + access_key["AccessKeyId"].should.have.length_of(20) + access_key["SecretAccessKey"].should.have.length_of(40) + assert access_key["AccessKeyId"].startswith("AKIA") @mock_iam_deprecated() @@ -499,6 +731,14 @@ def test_generate_credential_report(): result['generate_credential_report_response'][ 'generate_credential_report_result']['state'].should.equal('COMPLETE') +@mock_iam +def test_boto3_generate_credential_report(): + conn = boto3.client('iam', region_name='us-east-1') + result = conn.generate_credential_report() + result['State'].should.equal('STARTED') + result = conn.generate_credential_report() + result['State'].should.equal('COMPLETE') + @mock_iam_deprecated() def test_get_credential_report(): @@ -515,13 +755,27 @@ def test_get_credential_report(): report.should.match(r'.*my-user.*') +@mock_iam +def test_boto3_get_credential_report(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + with assert_raises(ClientError): + conn.get_credential_report() + result = conn.generate_credential_report() + while result['State'] != 'COMPLETE': + result = conn.generate_credential_report() + result = conn.get_credential_report() + report = result['Content'].decode('utf-8') + report.should.match(r'.*my-user.*') + + @requires_boto_gte('2.39') @mock_iam_deprecated() def test_managed_policy(): conn = boto.connect_iam() conn.create_policy(policy_name='UserManagedPolicy', - policy_document={'mypolicy': 'test'}, + policy_document=MOCK_POLICY, path='/mypolicy/', description='my user managed policy') @@ -622,7 +876,7 @@ def test_attach_detach_user_policy(): policy_name = 'UserAttachedPolicy' policy = iam.create_policy(PolicyName=policy_name, - PolicyDocument='{"mypolicy": "test"}', + PolicyDocument=MOCK_POLICY, Path='/mypolicy/', Description='my user attached policy') @@ -638,3 +892,572 @@ def test_attach_detach_user_policy(): resp = client.list_attached_user_policies(UserName=user.name) resp['AttachedPolicies'].should.have.length_of(0) + + +@mock_iam +def test_update_access_key(): + iam = boto3.resource('iam', region_name='us-east-1') + client = iam.meta.client + username = 'test-user' + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.update_access_key(UserName=username, + AccessKeyId='non-existent-key', + Status='Inactive') + key = client.create_access_key(UserName=username)['AccessKey'] + client.update_access_key(UserName=username, + AccessKeyId=key['AccessKeyId'], + Status='Inactive') + resp = client.list_access_keys(UserName=username) + resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') + + +@mock_iam +def test_get_access_key_last_used(): + iam = boto3.resource('iam', region_name='us-east-1') + client = iam.meta.client + username = 'test-user' + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.get_access_key_last_used(AccessKeyId='non-existent-key-id') + create_key_response = client.create_access_key(UserName=username)['AccessKey'] + resp = client.get_access_key_last_used(AccessKeyId=create_key_response['AccessKeyId']) + + datetime.strftime(resp["AccessKeyLastUsed"]["LastUsedDate"], "%Y-%m-%d").should.equal(datetime.strftime( + datetime.utcnow(), + "%Y-%m-%d" + )) + resp["UserName"].should.equal(create_key_response["UserName"]) + + +@mock_iam +def test_get_account_authorization_details(): + test_policy = json.dumps({ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "s3:ListBucket", + "Resource": "*", + "Effect": "Allow", + } + ] + }) + + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + conn.create_user(Path='/', UserName='testUser') + conn.create_group(Path='/', GroupName='testGroup') + conn.create_policy( + PolicyName='testPolicy', + Path='/', + PolicyDocument=test_policy, + Description='Test Policy' + ) + + # Attach things to the user and group: + conn.put_user_policy(UserName='testUser', PolicyName='testPolicy', PolicyDocument=test_policy) + conn.put_group_policy(GroupName='testGroup', PolicyName='testPolicy', PolicyDocument=test_policy) + + conn.attach_user_policy(UserName='testUser', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + conn.attach_group_policy(GroupName='testGroup', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + + conn.add_user_to_group(UserName='testUser', GroupName='testGroup') + + # Add things to the role: + conn.create_instance_profile(InstanceProfileName='ipn') + conn.add_role_to_instance_profile(InstanceProfileName='ipn', RoleName='my-role') + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somevalue' + }, + { + 'Key': 'someotherkey', + 'Value': 'someothervalue' + } + ]) + conn.put_role_policy(RoleName='my-role', PolicyName='test-policy', PolicyDocument=test_policy) + conn.attach_role_policy(RoleName='my-role', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + + result = conn.get_account_authorization_details(Filter=['Role']) + assert len(result['RoleDetailList']) == 1 + assert len(result['UserDetailList']) == 0 + assert len(result['GroupDetailList']) == 0 + assert len(result['Policies']) == 0 + assert len(result['RoleDetailList'][0]['InstanceProfileList']) == 1 + assert len(result['RoleDetailList'][0]['Tags']) == 2 + assert len(result['RoleDetailList'][0]['RolePolicyList']) == 1 + assert len(result['RoleDetailList'][0]['AttachedManagedPolicies']) == 1 + assert result['RoleDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy' + assert result['RoleDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \ + 'arn:aws:iam::123456789012:policy/testPolicy' + + result = conn.get_account_authorization_details(Filter=['User']) + assert len(result['RoleDetailList']) == 0 + assert len(result['UserDetailList']) == 1 + assert len(result['UserDetailList'][0]['GroupList']) == 1 + assert len(result['UserDetailList'][0]['AttachedManagedPolicies']) == 1 + assert len(result['GroupDetailList']) == 0 + assert len(result['Policies']) == 0 + assert result['UserDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy' + assert result['UserDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \ + 'arn:aws:iam::123456789012:policy/testPolicy' + + result = conn.get_account_authorization_details(Filter=['Group']) + assert len(result['RoleDetailList']) == 0 + assert len(result['UserDetailList']) == 0 + assert len(result['GroupDetailList']) == 1 + assert len(result['GroupDetailList'][0]['GroupPolicyList']) == 1 + assert len(result['GroupDetailList'][0]['AttachedManagedPolicies']) == 1 + assert len(result['Policies']) == 0 + assert result['GroupDetailList'][0]['AttachedManagedPolicies'][0]['PolicyName'] == 'testPolicy' + assert result['GroupDetailList'][0]['AttachedManagedPolicies'][0]['PolicyArn'] == \ + 'arn:aws:iam::123456789012:policy/testPolicy' + + result = conn.get_account_authorization_details(Filter=['LocalManagedPolicy']) + assert len(result['RoleDetailList']) == 0 + assert len(result['UserDetailList']) == 0 + assert len(result['GroupDetailList']) == 0 + assert len(result['Policies']) == 1 + assert len(result['Policies'][0]['PolicyVersionList']) == 1 + + # Check for greater than 1 since this should always be greater than one but might change. + # See iam/aws_managed_policies.py + result = conn.get_account_authorization_details(Filter=['AWSManagedPolicy']) + assert len(result['RoleDetailList']) == 0 + assert len(result['UserDetailList']) == 0 + assert len(result['GroupDetailList']) == 0 + assert len(result['Policies']) > 1 + + result = conn.get_account_authorization_details() + assert len(result['RoleDetailList']) == 1 + assert len(result['UserDetailList']) == 1 + assert len(result['GroupDetailList']) == 1 + assert len(result['Policies']) > 1 + + +@mock_iam +def test_signing_certs(): + client = boto3.client('iam', region_name='us-east-1') + + # Create the IAM user first: + client.create_user(UserName='testing') + + # Upload the cert: + resp = client.upload_signing_certificate(UserName='testing', CertificateBody=MOCK_CERT)['Certificate'] + cert_id = resp['CertificateId'] + + assert resp['UserName'] == 'testing' + assert resp['Status'] == 'Active' + assert resp['CertificateBody'] == MOCK_CERT + assert resp['CertificateId'] + + # Upload a the cert with an invalid body: + with assert_raises(ClientError) as ce: + client.upload_signing_certificate(UserName='testing', CertificateBody='notacert') + assert ce.exception.response['Error']['Code'] == 'MalformedCertificate' + + # Upload with an invalid user: + with assert_raises(ClientError): + client.upload_signing_certificate(UserName='notauser', CertificateBody=MOCK_CERT) + + # Update: + client.update_signing_certificate(UserName='testing', CertificateId=cert_id, Status='Inactive') + + with assert_raises(ClientError): + client.update_signing_certificate(UserName='notauser', CertificateId=cert_id, Status='Inactive') + + with assert_raises(ClientError) as ce: + client.update_signing_certificate(UserName='testing', CertificateId='x' * 32, Status='Inactive') + + assert ce.exception.response['Error']['Message'] == 'The Certificate with id {id} cannot be found.'.format( + id='x' * 32) + + # List the certs: + resp = client.list_signing_certificates(UserName='testing')['Certificates'] + assert len(resp) == 1 + assert resp[0]['CertificateBody'] == MOCK_CERT + assert resp[0]['Status'] == 'Inactive' # Changed with the update call above. + + with assert_raises(ClientError): + client.list_signing_certificates(UserName='notauser') + + # Delete: + client.delete_signing_certificate(UserName='testing', CertificateId=cert_id) + + with assert_raises(ClientError): + client.delete_signing_certificate(UserName='notauser', CertificateId=cert_id) + + +@mock_iam() +def test_create_saml_provider(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_saml_provider( + Name="TestSAMLProvider", + SAMLMetadataDocument='a' * 1024 + ) + response['SAMLProviderArn'].should.equal("arn:aws:iam::123456789012:saml-provider/TestSAMLProvider") + + +@mock_iam() +def test_get_saml_provider(): + conn = boto3.client('iam', region_name='us-east-1') + saml_provider_create = conn.create_saml_provider( + Name="TestSAMLProvider", + SAMLMetadataDocument='a' * 1024 + ) + response = conn.get_saml_provider( + SAMLProviderArn=saml_provider_create['SAMLProviderArn'] + ) + response['SAMLMetadataDocument'].should.equal('a' * 1024) + + +@mock_iam() +def test_list_saml_providers(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_saml_provider( + Name="TestSAMLProvider", + SAMLMetadataDocument='a' * 1024 + ) + response = conn.list_saml_providers() + response['SAMLProviderList'][0]['Arn'].should.equal("arn:aws:iam::123456789012:saml-provider/TestSAMLProvider") + + +@mock_iam() +def test_delete_saml_provider(): + conn = boto3.client('iam', region_name='us-east-1') + saml_provider_create = conn.create_saml_provider( + Name="TestSAMLProvider", + SAMLMetadataDocument='a' * 1024 + ) + response = conn.list_saml_providers() + len(response['SAMLProviderList']).should.equal(1) + conn.delete_saml_provider( + SAMLProviderArn=saml_provider_create['SAMLProviderArn'] + ) + response = conn.list_saml_providers() + len(response['SAMLProviderList']).should.equal(0) + conn.create_user(UserName='testing') + + cert_id = '123456789012345678901234' + with assert_raises(ClientError) as ce: + conn.delete_signing_certificate(UserName='testing', CertificateId=cert_id) + + assert ce.exception.response['Error']['Message'] == 'The Certificate with id {id} cannot be found.'.format( + id=cert_id) + + # Verify that it's not in the list: + resp = conn.list_signing_certificates(UserName='testing') + assert not resp['Certificates'] + + +@mock_iam() +def test_tag_role(): + """Tests both the tag_role and get_role_tags capability""" + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="{}") + + # Get without tags: + role = conn.get_role(RoleName='my-role')['Role'] + assert not role.get('Tags') + + # With proper tag values: + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somevalue' + }, + { + 'Key': 'someotherkey', + 'Value': 'someothervalue' + } + ]) + + # Get role: + role = conn.get_role(RoleName='my-role')['Role'] + assert len(role['Tags']) == 2 + assert role['Tags'][0]['Key'] == 'somekey' + assert role['Tags'][0]['Value'] == 'somevalue' + assert role['Tags'][1]['Key'] == 'someotherkey' + assert role['Tags'][1]['Value'] == 'someothervalue' + + # Same -- but for list_role_tags: + tags = conn.list_role_tags(RoleName='my-role') + assert len(tags['Tags']) == 2 + assert role['Tags'][0]['Key'] == 'somekey' + assert role['Tags'][0]['Value'] == 'somevalue' + assert role['Tags'][1]['Key'] == 'someotherkey' + assert role['Tags'][1]['Value'] == 'someothervalue' + assert not tags['IsTruncated'] + assert not tags.get('Marker') + + # Test pagination: + tags = conn.list_role_tags(RoleName='my-role', MaxItems=1) + assert len(tags['Tags']) == 1 + assert tags['IsTruncated'] + assert tags['Tags'][0]['Key'] == 'somekey' + assert tags['Tags'][0]['Value'] == 'somevalue' + assert tags['Marker'] == '1' + + tags = conn.list_role_tags(RoleName='my-role', Marker=tags['Marker']) + assert len(tags['Tags']) == 1 + assert tags['Tags'][0]['Key'] == 'someotherkey' + assert tags['Tags'][0]['Value'] == 'someothervalue' + assert not tags['IsTruncated'] + assert not tags.get('Marker') + + # Test updating an existing tag: + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somenewvalue' + } + ]) + tags = conn.list_role_tags(RoleName='my-role') + assert len(tags['Tags']) == 2 + assert tags['Tags'][0]['Key'] == 'somekey' + assert tags['Tags'][0]['Value'] == 'somenewvalue' + + # Empty is good: + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': '' + } + ]) + tags = conn.list_role_tags(RoleName='my-role') + assert len(tags['Tags']) == 2 + assert tags['Tags'][0]['Key'] == 'somekey' + assert tags['Tags'][0]['Value'] == '' + + # Test creating tags with invalid values: + # With more than 50 tags: + with assert_raises(ClientError) as ce: + too_many_tags = list(map(lambda x: {'Key': str(x), 'Value': str(x)}, range(0, 51))) + conn.tag_role(RoleName='my-role', Tags=too_many_tags) + assert 'failed to satisfy constraint: Member must have length less than or equal to 50.' \ + in ce.exception.response['Error']['Message'] + + # With a duplicate tag: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': '0', 'Value': ''}, {'Key': '0', 'Value': ''}]) + assert 'Duplicate tag keys found. Please note that Tag keys are case insensitive.' \ + in ce.exception.response['Error']['Message'] + + # Duplicate tag with different casing: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': 'a', 'Value': ''}, {'Key': 'A', 'Value': ''}]) + assert 'Duplicate tag keys found. Please note that Tag keys are case insensitive.' \ + in ce.exception.response['Error']['Message'] + + # With a really big key: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': '0' * 129, 'Value': ''}]) + assert 'Member must have length less than or equal to 128.' in ce.exception.response['Error']['Message'] + + # With a really big value: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': '0', 'Value': '0' * 257}]) + assert 'Member must have length less than or equal to 256.' in ce.exception.response['Error']['Message'] + + # With an invalid character: + with assert_raises(ClientError) as ce: + conn.tag_role(RoleName='my-role', Tags=[{'Key': 'NOWAY!', 'Value': ''}]) + assert 'Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+' \ + in ce.exception.response['Error']['Message'] + + # With a role that doesn't exist: + with assert_raises(ClientError): + conn.tag_role(RoleName='notarole', Tags=[{'Key': 'some', 'Value': 'value'}]) + + +@mock_iam +def test_untag_role(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="{}") + + # With proper tag values: + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somevalue' + }, + { + 'Key': 'someotherkey', + 'Value': 'someothervalue' + } + ]) + + # Remove them: + conn.untag_role(RoleName='my-role', TagKeys=['somekey']) + tags = conn.list_role_tags(RoleName='my-role') + assert len(tags['Tags']) == 1 + assert tags['Tags'][0]['Key'] == 'someotherkey' + assert tags['Tags'][0]['Value'] == 'someothervalue' + + # And again: + conn.untag_role(RoleName='my-role', TagKeys=['someotherkey']) + tags = conn.list_role_tags(RoleName='my-role') + assert not tags['Tags'] + + # Test removing tags with invalid values: + # With more than 50 tags: + with assert_raises(ClientError) as ce: + conn.untag_role(RoleName='my-role', TagKeys=[str(x) for x in range(0, 51)]) + assert 'failed to satisfy constraint: Member must have length less than or equal to 50.' \ + in ce.exception.response['Error']['Message'] + assert 'tagKeys' in ce.exception.response['Error']['Message'] + + # With a really big key: + with assert_raises(ClientError) as ce: + conn.untag_role(RoleName='my-role', TagKeys=['0' * 129]) + assert 'Member must have length less than or equal to 128.' in ce.exception.response['Error']['Message'] + assert 'tagKeys' in ce.exception.response['Error']['Message'] + + # With an invalid character: + with assert_raises(ClientError) as ce: + conn.untag_role(RoleName='my-role', TagKeys=['NOWAY!']) + assert 'Member must satisfy regular expression pattern: [\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+' \ + in ce.exception.response['Error']['Message'] + assert 'tagKeys' in ce.exception.response['Error']['Message'] + + # With a role that doesn't exist: + with assert_raises(ClientError): + conn.untag_role(RoleName='notarole', TagKeys=['somevalue']) + + +@mock_iam() +def test_update_role_description(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.delete_role(RoleName="my-role") + + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + response = conn.update_role_description(RoleName="my-role", Description="test") + + assert response['Role']['RoleName'] == 'my-role' + +@mock_iam() +def test_update_role(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.delete_role(RoleName="my-role") + + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + response = conn.update_role_description(RoleName="my-role", Description="test") + assert response['Role']['RoleName'] == 'my-role' + +@mock_iam() +def test_update_role(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.delete_role(RoleName="my-role") + + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + response = conn.update_role(RoleName="my-role", Description="test") + assert len(response.keys()) == 1 + + +@mock_iam() +def test_list_entities_for_policy(): + test_policy = json.dumps({ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "s3:ListBucket", + "Resource": "*", + "Effect": "Allow", + } + ] + }) + + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + conn.create_user(Path='/', UserName='testUser') + conn.create_group(Path='/', GroupName='testGroup') + conn.create_policy( + PolicyName='testPolicy', + Path='/', + PolicyDocument=test_policy, + Description='Test Policy' + ) + + # Attach things to the user and group: + conn.put_user_policy(UserName='testUser', PolicyName='testPolicy', PolicyDocument=test_policy) + conn.put_group_policy(GroupName='testGroup', PolicyName='testPolicy', PolicyDocument=test_policy) + + conn.attach_user_policy(UserName='testUser', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + conn.attach_group_policy(GroupName='testGroup', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + + conn.add_user_to_group(UserName='testUser', GroupName='testGroup') + + # Add things to the role: + conn.create_instance_profile(InstanceProfileName='ipn') + conn.add_role_to_instance_profile(InstanceProfileName='ipn', RoleName='my-role') + conn.tag_role(RoleName='my-role', Tags=[ + { + 'Key': 'somekey', + 'Value': 'somevalue' + }, + { + 'Key': 'someotherkey', + 'Value': 'someothervalue' + } + ]) + conn.put_role_policy(RoleName='my-role', PolicyName='test-policy', PolicyDocument=test_policy) + conn.attach_role_policy(RoleName='my-role', PolicyArn='arn:aws:iam::123456789012:policy/testPolicy') + + response = conn.list_entities_for_policy( + PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', + EntityFilter='Role' + ) + assert response['PolicyRoles'] == [{'RoleName': 'my-role'}] + + response = conn.list_entities_for_policy( + PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', + EntityFilter='User', + ) + assert response['PolicyUsers'] == [{'UserName': 'testUser'}] + + response = conn.list_entities_for_policy( + PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', + EntityFilter='Group', + ) + assert response['PolicyGroups'] == [{'GroupName': 'testGroup'}] + + response = conn.list_entities_for_policy( + PolicyArn='arn:aws:iam::123456789012:policy/testPolicy', + EntityFilter='LocalManagedPolicy', + ) + assert response['PolicyGroups'] == [{'GroupName': 'testGroup'}] + assert response['PolicyUsers'] == [{'UserName': 'testUser'}] + assert response['PolicyRoles'] == [{'RoleName': 'my-role'}] + + +@mock_iam() +def test_create_role_no_path(): + conn = boto3.client('iam', region_name='us-east-1') + resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test') + resp.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-role') + resp.get('Role').should_not.have.key('PermissionsBoundary') + +@mock_iam() +def test_create_role_with_permissions_boundary(): + conn = boto3.client('iam', region_name='us-east-1') + boundary = 'arn:aws:iam::123456789012:policy/boundary' + resp = conn.create_role(RoleName='my-role', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=boundary) + expected = { + 'PermissionsBoundaryType': 'PermissionsBoundaryPolicy', + 'PermissionsBoundaryArn': boundary + } + resp.get('Role').get('PermissionsBoundary').should.equal(expected) + + invalid_boundary_arn = 'arn:aws:iam::123456789:not_a_boundary' + with assert_raises(ClientError): + conn.create_role(RoleName='bad-boundary', AssumeRolePolicyDocument='some policy', Description='test', PermissionsBoundary=invalid_boundary_arn) + + # Ensure the PermissionsBoundary is included in role listing as well + conn.list_roles().get('Roles')[0].get('PermissionsBoundary').should.equal(expected) diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 49c7987f6..1ca9f2512 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -1,4 +1,7 @@ from __future__ import unicode_literals + +from datetime import datetime + import boto import boto3 import sure # noqa @@ -7,6 +10,18 @@ from nose.tools import assert_raises from boto.exception import BotoServerError from moto import mock_iam, mock_iam_deprecated +MOCK_POLICY = """ +{ + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } +} +""" + @mock_iam_deprecated() def test_create_group(): @@ -25,6 +40,25 @@ def test_get_group(): conn.get_group('not-group') +@mock_iam() +def test_get_group_current(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_group(GroupName='my-group') + result = conn.get_group(GroupName='my-group') + + assert result['Group']['Path'] == '/' + assert result['Group']['GroupName'] == 'my-group' + assert isinstance(result['Group']['CreateDate'], datetime) + assert result['Group']['GroupId'] + assert result['Group']['Arn'] == 'arn:aws:iam::123456789012:group/my-group' + assert not result['Users'] + + # Make a group with a different path: + other_group = conn.create_group(GroupName='my-other-group', Path='some/location') + assert other_group['Group']['Path'] == 'some/location' + assert other_group['Group']['Arn'] == 'arn:aws:iam::123456789012:group/some/location/my-other-group' + + @mock_iam_deprecated() def test_get_all_groups(): conn = boto.connect_iam() @@ -79,7 +113,7 @@ def test_get_groups_for_user(): def test_put_group_policy(): conn = boto.connect_iam() conn.create_group('my-group') - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + conn.put_group_policy('my-group', 'my-policy', MOCK_POLICY) @mock_iam @@ -109,7 +143,7 @@ def test_get_group_policy(): with assert_raises(BotoServerError): conn.get_group_policy('my-group', 'my-policy') - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + conn.put_group_policy('my-group', 'my-policy', MOCK_POLICY) conn.get_group_policy('my-group', 'my-policy') @@ -119,7 +153,7 @@ def test_get_all_group_policies(): conn.create_group('my-group') policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] assert policies == [] - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + conn.put_group_policy('my-group', 'my-policy', MOCK_POLICY) policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] assert policies == ['my-policy'] @@ -129,5 +163,5 @@ def test_list_group_policies(): conn = boto3.client('iam', region_name='us-east-1') conn.create_group(GroupName='my-group') conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty - conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument='{"some": "json"}') + conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument=MOCK_POLICY) conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.equal(['my-policy']) diff --git a/tests/test_iam/test_iam_policies.py b/tests/test_iam/test_iam_policies.py new file mode 100644 index 000000000..e1924a559 --- /dev/null +++ b/tests/test_iam/test_iam_policies.py @@ -0,0 +1,1861 @@ +import json + +import boto3 +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_iam + +invalid_policy_document_test_cases = [ + { + "document": "This is not a json document", + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2008-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2013-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17" + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": ["afd"] + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + "Extra field": "value" + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Extra field": "value" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Id": ["cd3a324d2343d942772346-34234234423404-4c2242343242349d1642ee"], + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Id": {}, + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "invalid", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "invalid", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "a a:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Vendor a a is not valid' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:List:Bucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Condition can contain only one colon.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Effect": "Allow", + "Action": "s:3s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + "error_message": 'Actions/Condition can contain only one colon.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "invalid resource" + } + }, + "error_message": 'Resource invalid resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "EnableDisableHongKong", + "Effect": "Allow", + "Action": [ + "account:EnableRegion", + "account:DisableRegion" + ], + "Resource": "", + "Condition": { + "StringEquals": {"account:TargetRegion": "ap-east-1"} + } + }, + { + "Sid": "ViewConsole", + "Effect": "Allow", + "Action": [ + "aws-portal:ViewAccount", + "account:ListRegions" + ], + "Resource": "" + } + ] + }, + "error_message": 'Resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s:3:ListBucket", + "Resource": "sdfsadf" + } + }, + "error_message": 'Resource sdfsadf must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["adf"] + } + }, + "error_message": 'Resource adf must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "" + } + }, + "error_message": 'Resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:bsdfdsafsad" + } + }, + "error_message": 'Partition "bsdfdsafsad" is not valid for resource "arn:bsdfdsafsad:*:*:*:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:b:cadfsdf" + } + }, + "error_message": 'Partition "b" is not valid for resource "arn:b:cadfsdf:*:*:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:b:c:d:e:f:g:h" + } + }, + "error_message": 'Partition "b" is not valid for resource "arn:b:c:d:e:f:g:h".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "aws:s3:::example_bucket" + } + }, + "error_message": 'Partition "s3" is not valid for resource "arn:s3:::example_bucket:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:error:s3:::example_bucket", + "arn:error:s3::example_bucket" + ] + } + }, + "error_message": 'Partition "error" is not valid for resource "arn:error:s3:::example_bucket".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [] + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket" + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [] + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "invalid" + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": [], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:::example_bucket" + } + }, + "error_message": 'IAM resource path must either be "*" or start with user/, federated-user/, role/, group/, instance-profile/, mfa/, server-certificate/, policy/, sms-mfa/, saml-provider/, oidc-provider/, report/, access-report/.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Resource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws" + } + }, + "error_message": 'Resource vendor must be fully qualified and cannot contain regexes.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": { + "a": "arn:aws:s3:::example_bucket" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": "s3:ListBucket", + "Resource": ["adfdf", {}] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "NotResource": [] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": [[]], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Action": [], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": {}, + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": [] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": "a" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "a": "b" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": "b" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": [] + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": {}} + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": {}} + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "x": { + "a": "1" + } + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "ForAnyValue::StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": [ + {"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}} + ] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:us-east-1::example_bucket" + } + }, + "error_message": 'IAM resource arn:aws:iam:us-east-1::example_bucket cannot contain region information.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:us-east-1::example_bucket" + } + }, + "error_message": 'Resource arn:aws:s3:us-east-1::example_bucket can not contain region information.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Sid": {}, + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Sid": [], + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'Statement IDs (SID) in a single policy must be unique.' + }, + { + "document": { + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Action": "iam:dsf", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "NotResource": "*" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "denY", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Statement": { + "Effect": "denY", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws::::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "allow", + "Resource": "arn:aws:s3:us-east-1::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "aLLow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "NotResource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "234-13" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.593194+1" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.1999999999+10:59" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThan": { + "a": "9223372036854775808" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:error:s3:::example_bucket", + "Condition": { + "DateGreaterThan": { + "a": "sdfdsf" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws::fdsasf" + } + }, + "error_message": 'The policy failed legacy parsing' + } +] + +valid_policy_documents = [ + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::example_bucket" + ] + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "iam: asdf safdsf af ", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::example_bucket", + "*" + ] + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "*", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "service-prefix:action-name", + "Resource": "*", + "Condition": { + "DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"}, + "DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "fsx:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:::user/example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s33:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:fdsasf" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": {} + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": {"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}} + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:cloudwatch:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:ec2:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:invalid-service:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:invalid-service:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"}, + "DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": []} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "a": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Sid": "dsfsdfsdfsdfsdfsadfsd", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ConsoleDisplay", + "Effect": "Allow", + "Action": [ + "iam:GetRole", + "iam:GetUser", + "iam:ListRoles", + "iam:ListRoleTags", + "iam:ListUsers", + "iam:ListUserTags" + ], + "Resource": "*" + }, + { + "Sid": "AddTag", + "Effect": "Allow", + "Action": [ + "iam:TagUser", + "iam:TagRole" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:RequestTag/CostCenter": [ + "A-123", + "B-456" + ] + }, + "ForAllValues:StringEquals": {"aws:TagKeys": "CostCenter"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": "s3:*", + "NotResource": [ + "arn:aws:s3:::HRBucket/Payroll", + "arn:aws:s3:::HRBucket/Payroll/*" + ] + } + }, + { + "Version": "2012-10-17", + "Id": "sdfsdfsdf", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "aaaaaadsfdsafsadfsadfaaaaa:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3-s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3.s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "NotResource": "*" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": { + "a": "01T" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "x": { + }, + "y": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "ForAnyValue:StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2019-07-01T13:20:15Z" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13T21:20:37.593194+00:00" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.593194+23" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThan": { + "a": "-292275054" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowViewAccountInfo", + "Effect": "Allow", + "Action": [ + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary", + "iam:ListVirtualMFADevices" + ], + "Resource": "*" + }, + { + "Sid": "AllowManageOwnPasswords", + "Effect": "Allow", + "Action": [ + "iam:ChangePassword", + "iam:GetUser" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnAccessKeys", + "Effect": "Allow", + "Action": [ + "iam:CreateAccessKey", + "iam:DeleteAccessKey", + "iam:ListAccessKeys", + "iam:UpdateAccessKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSigningCertificates", + "Effect": "Allow", + "Action": [ + "iam:DeleteSigningCertificate", + "iam:ListSigningCertificates", + "iam:UpdateSigningCertificate", + "iam:UploadSigningCertificate" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSSHPublicKeys", + "Effect": "Allow", + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnGitCredentials", + "Effect": "Allow", + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ListServiceSpecificCredentials", + "iam:ResetServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnVirtualMFADevice", + "Effect": "Allow", + "Action": [ + "iam:CreateVirtualMFADevice", + "iam:DeleteVirtualMFADevice" + ], + "Resource": "arn:aws:iam::*:mfa/${aws:username}" + }, + { + "Sid": "AllowManageOwnUserMFA", + "Effect": "Allow", + "Action": [ + "iam:DeactivateMFADevice", + "iam:EnableMFADevice", + "iam:ListMFADevices", + "iam:ResyncMFADevice" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "DenyAllExceptListedIfNoMFA", + "Effect": "Deny", + "NotAction": [ + "iam:CreateVirtualMFADevice", + "iam:EnableMFADevice", + "iam:GetUser", + "iam:ListMFADevices", + "iam:ListVirtualMFADevices", + "iam:ResyncMFADevice", + "sts:GetSessionToken" + ], + "Resource": "*", + "Condition": { + "BoolIfExists": { + "aws:MultiFactorAuthPresent": "false" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListAndDescribe", + "Effect": "Allow", + "Action": [ + "dynamodb:List*", + "dynamodb:DescribeReservedCapacity*", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTimeToLive" + ], + "Resource": "*" + }, + { + "Sid": "SpecificTable", + "Effect": "Allow", + "Action": [ + "dynamodb:BatchGet*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Get*", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:BatchWrite*", + "dynamodb:CreateTable", + "dynamodb:Delete*", + "dynamodb:Update*", + "dynamodb:PutItem" + ], + "Resource": "arn:aws:dynamodb:*:*:table/MyTable" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:instance/*" + ], + "Condition": { + "ArnEquals": {"ec2:SourceInstanceARN": "arn:aws:ec2:*:*:instance/instance-id"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": "arn:aws:ec2:*:*:instance/*", + "Condition": { + "StringEquals": {"ec2:ResourceTag/Department": "Development"} + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": "arn:aws:ec2:*:*:volume/*", + "Condition": { + "StringEquals": {"ec2:ResourceTag/VolumeUser": "${aws:username}"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "StartStopIfTags", + "Effect": "Allow", + "Action": [ + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:DescribeTags" + ], + "Resource": "arn:aws:ec2:region:account-id:instance/*", + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Project": "DataAnalytics", + "aws:PrincipalTag/Department": "Data" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListYourObjects", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["arn:aws:s3:::bucket-name"], + "Condition": { + "StringLike": { + "s3:prefix": ["cognito/application-name/${cognito-identity.amazonaws.com:sub}"] + } + } + }, + { + "Sid": "ReadWriteDeleteYourObjects", + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}", + "arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}/*" + ] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListAllMyBuckets", + "s3:GetBucketLocation" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::bucket-name", + "Condition": { + "StringLike": { + "s3:prefix": [ + "", + "home/", + "home/${aws:userid}/*" + ] + } + } + }, + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": [ + "arn:aws:s3:::bucket-name/home/${aws:userid}", + "arn:aws:s3:::bucket-name/home/${aws:userid}/*" + ] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ConsoleAccess", + "Effect": "Allow", + "Action": [ + "s3:GetAccountPublicAccessBlock", + "s3:GetBucketAcl", + "s3:GetBucketLocation", + "s3:GetBucketPolicyStatus", + "s3:GetBucketPublicAccessBlock", + "s3:ListAllMyBuckets" + ], + "Resource": "*" + }, + { + "Sid": "ListObjectsInBucket", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["arn:aws:s3:::bucket-name"] + }, + { + "Sid": "AllObjectActions", + "Effect": "Allow", + "Action": "s3:*Object", + "Resource": ["arn:aws:s3:::bucket-name/*"] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowViewAccountInfo", + "Effect": "Allow", + "Action": [ + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary" + ], + "Resource": "*" + }, + { + "Sid": "AllowManageOwnPasswords", + "Effect": "Allow", + "Action": [ + "iam:ChangePassword", + "iam:GetUser" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnAccessKeys", + "Effect": "Allow", + "Action": [ + "iam:CreateAccessKey", + "iam:DeleteAccessKey", + "iam:ListAccessKeys", + "iam:UpdateAccessKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSigningCertificates", + "Effect": "Allow", + "Action": [ + "iam:DeleteSigningCertificate", + "iam:ListSigningCertificates", + "iam:UpdateSigningCertificate", + "iam:UploadSigningCertificate" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSSHPublicKeys", + "Effect": "Allow", + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnGitCredentials", + "Effect": "Allow", + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ListServiceSpecificCredentials", + "iam:ResetServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "ec2:*", + "Resource": "*", + "Effect": "Allow", + "Condition": { + "StringEquals": { + "ec2:Region": "region" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "rds:*", + "Resource": ["arn:aws:rds:region:*:*"] + }, + { + "Effect": "Allow", + "Action": ["rds:Describe*"], + "Resource": ["*"] + } + ] + } +] + + +def test_create_policy_with_invalid_policy_documents(): + for test_case in invalid_policy_document_test_cases: + yield check_create_policy_with_invalid_policy_document, test_case + + +def test_create_policy_with_valid_policy_documents(): + for valid_policy_document in valid_policy_documents: + yield check_create_policy_with_valid_policy_document, valid_policy_document + + +@mock_iam +def check_create_policy_with_invalid_policy_document(test_case): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(ClientError) as ex: + conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument=json.dumps(test_case["document"])) + ex.exception.response['Error']['Code'].should.equal('MalformedPolicyDocument') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal(test_case["error_message"]) + + +@mock_iam +def check_create_policy_with_valid_policy_document(valid_policy_document): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument=json.dumps(valid_policy_document)) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 31631e459..a580f56d1 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,8 +1,12 @@ from __future__ import unicode_literals -import boto3 +import json import sure # noqa +import boto3 + from moto import mock_iot +from botocore.exceptions import ClientError +from nose.tools import assert_raises @mock_iot @@ -34,12 +38,14 @@ def test_things(): res.should.have.key('things').which.should.have.length_of(1) for thing in res['things']: thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) res = client.list_things() res.should.have.key('things').which.should.have.length_of(1) for thing in res['things']: thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') thing = client.describe_thing(thingName=name) @@ -61,8 +67,168 @@ def test_things(): @mock_iot -def test_certs(): +def test_list_thing_types(): client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 100): + client.create_thing_type(thingTypeName=str(i + 1)) + + thing_types = client.list_thing_types() + thing_types.should.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('1') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50') + + thing_types = client.list_thing_types(nextToken=thing_types['nextToken']) + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types.should_not.have.key('nextToken') + thing_types['thingTypes'][0]['thingTypeName'].should.equal('51') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100') + + +@mock_iot +def test_list_thing_types_with_typename_filter(): + client = boto3.client('iot', region_name='ap-northeast-1') + + client.create_thing_type(thingTypeName='thing') + client.create_thing_type(thingTypeName='thingType') + client.create_thing_type(thingTypeName='thingTypeName') + client.create_thing_type(thingTypeName='thingTypeNameGroup') + client.create_thing_type(thingTypeName='shouldNotFind') + client.create_thing_type(thingTypeName='find me it shall not') + + thing_types = client.list_thing_types(thingTypeName='thing') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(4) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + thing_types = client.list_thing_types(thingTypeName='thingTypeName') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(2) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + +@mock_iot +def test_list_things_with_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 200): + client.create_thing(thingName=str(i + 1)) + + things = client.list_things() + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('1') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1') + things['things'][-1]['thingName'].should.equal('50') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('51') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('101') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + + things = client.list_things(nextToken=things['nextToken']) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('151') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + + +@mock_iot +def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + client.create_thing_type(thingTypeName='my-thing-type') + + for i in range(0, 200): + if not (i + 1) % 3: + attribute_payload = { + 'attributes': { + 'foo': 'bar' + } + } + elif not (i + 1) % 5: + attribute_payload = { + 'attributes': { + 'bar': 'foo' + } + } + else: + attribute_payload = {} + + if not (i + 1) % 2: + thing_type_name = 'my-thing-type' + client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload) + else: + client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload) + + # Test filter for thingTypeName + things = client.list_things(thingTypeName=thing_type_name) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('2') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('102') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + # Test filter for attributes + things = client.list_things(attributeName='foo', attributeValue='bar') + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('3') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(16) + things['things'][0]['thingName'].should.equal('153') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + # Test filter for attributes and thingTypeName + things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(33) + things['things'][0]['thingName'].should.equal('6') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things']) + + +@mock_iot +def test_certs(): + client = boto3.client('iot', region_name='us-east-1') cert = client.create_keys_and_certificate(setAsActive=True) cert.should.have.key('certificateArn').which.should_not.be.none cert.should.have.key('certificateId').which.should_not.be.none @@ -79,6 +245,29 @@ def test_certs(): cert_desc.should.have.key('certificateId').which.should_not.be.none cert_desc.should.have.key('certificatePem').which.should_not.be.none cert_desc.should.have.key('status').which.should.equal('ACTIVE') + cert_pem = cert_desc['certificatePem'] + + res = client.list_certificates() + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates') + + # Test register_certificate flow + cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True) + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificateArn').which.should_not.be.none + cert_id = cert['certificateId'] res = client.list_certificates() res.should.have.key('certificates').which.should.have.length_of(1) @@ -90,12 +279,122 @@ def test_certs(): client.update_certificate(certificateId=cert_id, newStatus='REVOKED') cert = client.describe_certificate(certificateId=cert_id) - cert_desc.should.have.key('status').which.should.equal('ACTIVE') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates') + + +@mock_iot +def test_delete_policy_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_policy(policyName=policy_name) + e.exception.response['Error']['Message'].should.contain( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' % policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_policy(policyName=policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_delete_certificate_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_id = cert['certificateId'] + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + thing_name = 'thing-1' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + client.create_thing(thingName=thing_name) + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate must be deactivated (not ACTIVE) before deletion.') + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Things must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate policies must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) client.delete_certificate(certificateId=cert_id) res = client.list_certificates() res.should.have.key('certificates').which.should.have.length_of(0) + +@mock_iot +def test_certs_create_inactive(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=False) + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('INACTIVE') + + client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + @mock_iot def test_policy(): client = boto3.client('iot', region_name='ap-northeast-1') @@ -126,6 +425,47 @@ def test_policy(): @mock_iot def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + # do nothing if policy have already attached to certificate + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + with assert_raises(ClientError) as e: + client.detach_policy(policyName=policy_name, target=cert_arn) + e.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + + +@mock_iot +def test_principal_policy_deprecated(): client = boto3.client('iot', region_name='ap-northeast-1') policy_name = 'my-policy' doc = '{}' @@ -177,3 +517,324 @@ def test_principal_thing(): res.should.have.key('things').which.should.have.length_of(0) res = client.list_thing_principals(thingName=thing_name) res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_thing_groups(): + client = boto3.client('iot', region_name='ap-northeast-1') + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(1) + for thing_group in res['thingGroups']: + thing_group.should.have.key('groupName').which.should_not.be.none + thing_group.should.have.key('groupArn').which.should_not.be.none + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupProperties') + thing_group.should.have.key('thingGroupMetadata') + thing_group.should.have.key('version') + + # delete thing group + client.delete_thing_group(thingGroupName=group_name) + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(0) + + # props create test + props = { + 'thingGroupDescription': 'my first thing group', + 'attributePayload': { + 'attributes': { + 'key1': 'val01', + 'Key02': 'VAL2' + } + } + } + thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + # props update test with merge + new_props = { + 'attributePayload': { + 'attributes': { + 'k3': 'v3' + }, + 'merge': True + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + res_props.should.have.key('k3').which.should.equal('v3') + + # props update test + new_props = { + 'attributePayload': { + 'attributes': { + 'k4': 'v4' + } + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('k4').which.should.equal('v4') + res_props.should_not.have.key('key1') + + +@mock_iot +def test_thing_group_relations(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # add in 4 way + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + thing_groups = client.list_thing_groups_for_thing( + thingName=name + ) + thing_groups.should.have.key('thingGroups') + thing_groups['thingGroups'].should.have.length_of(1) + + # remove in 4 way + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + # update thing group for thing + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToAdd=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToRemove=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + +@mock_iot +def test_create_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + +@mock_iot +def test_describe_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('documentSource') + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_describe_job_1(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) diff --git a/tests/test_iotdata/test_iotdata.py b/tests/test_iotdata/test_iotdata.py index 5768d31c7..09c1ada4c 100644 --- a/tests/test_iotdata/test_iotdata.py +++ b/tests/test_iotdata/test_iotdata.py @@ -85,3 +85,9 @@ def test_update(): payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') payload.should.have.key('version').which.should.equal(2) payload.should.have.key('timestamp') + + +@mock_iotdata +def test_publish(): + client = boto3.client('iot-data', region_name='ap-northeast-1') + client.publish(topic='test/topic', qos=1, payload=b'') diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index 26a87f35a..e2de866fc 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -1,9 +1,12 @@ from __future__ import unicode_literals +import datetime +import time + import boto.kinesis -from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException import boto3 -import sure # noqa +from boto.kinesis.exceptions import ResourceNotFoundException, \ + InvalidArgumentException from moto import mock_kinesis, mock_kinesis_deprecated @@ -12,7 +15,7 @@ from moto import mock_kinesis, mock_kinesis_deprecated def test_create_cluster(): conn = boto.kinesis.connect_to_region("us-west-2") - conn.create_stream("my_stream", 2) + conn.create_stream("my_stream", 3) stream_response = conn.describe_stream("my_stream") @@ -24,7 +27,7 @@ def test_create_cluster(): stream["StreamStatus"].should.equal("ACTIVE") shards = stream['Shards'] - shards.should.have.length_of(2) + shards.should.have.length_of(3) @mock_kinesis_deprecated @@ -71,6 +74,23 @@ def test_list_many_streams(): has_more_streams.should.equal(False) +@mock_kinesis +def test_describe_stream_summary(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = 'my_stream_summary' + shard_count = 5 + conn.create_stream(StreamName=stream_name, ShardCount=shard_count) + + resp = conn.describe_stream_summary(StreamName=stream_name) + stream = resp["StreamDescriptionSummary"] + + stream["StreamName"].should.equal(stream_name) + stream["OpenShardCount"].should.equal(shard_count) + stream["StreamARN"].should.equal( + "arn:aws:kinesis:us-west-2:123456789012:{}".format(stream_name)) + stream["StreamStatus"].should.equal("ACTIVE") + + @mock_kinesis_deprecated def test_basic_shard_iterator(): conn = boto.kinesis.connect_to_region("us-west-2") @@ -87,6 +107,7 @@ def test_basic_shard_iterator(): response = conn.get_records(shard_iterator) shard_iterator = response['NextShardIterator'] response['Records'].should.equal([]) + response['MillisBehindLatest'].should.equal(0) @mock_kinesis_deprecated @@ -97,7 +118,8 @@ def test_get_invalid_shard_iterator(): conn.create_stream(stream_name, 1) conn.get_shard_iterator.when.called_with( - stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) + stream_name, "123", 'TRIM_HORIZON').should.throw( + ResourceNotFoundException) @mock_kinesis_deprecated @@ -223,6 +245,7 @@ def test_get_records_after_sequence_number(): response = conn.get_records(shard_iterator) # And the first result returned should be the third item response['Records'][0]['Data'].should.equal('3') + response['MillisBehindLatest'].should.equal(0) @mock_kinesis_deprecated @@ -260,6 +283,192 @@ def test_get_records_latest(): response['Records'].should.have.length_of(1) response['Records'][0]['PartitionKey'].should.equal('last_record') response['Records'][0]['Data'].should.equal('last_record') + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_at_timestamp(): + # AT_TIMESTAMP - Read the first record at or after the specified timestamp + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + for index in range(1, 5): + conn.put_record(StreamName=stream_name, + Data=str(index), + PartitionKey=str(index)) + + # When boto3 floors the timestamp that we pass to get_shard_iterator to + # second precision even though AWS supports ms precision: + # http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html + # To test around this limitation we wait until we well into the next second + # before capturing the time and storing the records we expect to retrieve. + time.sleep(1.0) + timestamp = datetime.datetime.utcnow() + + keys = [str(i) for i in range(5, 10)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(len(keys)) + partition_keys = [r['PartitionKey'] for r in response['Records']] + partition_keys.should.equal(keys) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_at_very_old_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + keys = [str(i) for i in range(1, 5)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=1) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + response['Records'].should.have.length_of(len(keys)) + partition_keys = [r['PartitionKey'] for r in response['Records']] + partition_keys.should.equal(keys) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_timestamp_filtering(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + conn.put_record(StreamName=stream_name, + Data='0', + PartitionKey='0') + + time.sleep(1.0) + timestamp = datetime.datetime.utcnow() + + conn.put_record(StreamName=stream_name, + Data='1', + PartitionKey='1') + + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + response['Records'].should.have.length_of(1) + response['Records'][0]['PartitionKey'].should.equal('1') + response['Records'][0]['ApproximateArrivalTimestamp'].should.be. \ + greater_than(timestamp) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_millis_behind_latest(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + conn.put_record(StreamName=stream_name, + Data='0', + PartitionKey='0') + time.sleep(1.0) + conn.put_record(StreamName=stream_name, + Data='1', + PartitionKey='1') + + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator, Limit=1) + response['Records'].should.have.length_of(1) + response['MillisBehindLatest'].should.be.greater_than(0) + + +@mock_kinesis +def test_get_records_at_very_new_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + keys = [str(i) for i in range(1, 5)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + timestamp = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(0) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_from_empty_stream_at_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + timestamp = datetime.datetime.utcnow() + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(0) + response['MillisBehindLatest'].should.equal(0) @mock_kinesis_deprecated @@ -271,7 +480,8 @@ def test_invalid_shard_iterator_type(): response = conn.describe_stream(stream_name) shard_id = response['StreamDescription']['Shards'][0]['ShardId'] response = conn.get_shard_iterator.when.called_with( - stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) + stream_name, shard_id, 'invalid-type').should.throw( + InvalidArgumentException) @mock_kinesis_deprecated @@ -359,7 +569,8 @@ def test_split_shard(): shard_range = shards[0]['HashKeyRange'] new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + int(shard_range['EndingHashKey']) + int( + shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -372,7 +583,8 @@ def test_split_shard(): shard_range = shards[2]['HashKeyRange'] new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + int(shard_range['EndingHashKey']) + int( + shard_range['StartingHashKey'])) // 2 conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) stream_response = conn.describe_stream(stream_name) @@ -402,7 +614,8 @@ def test_merge_shards(): shards.should.have.length_of(4) conn.merge_shards.when.called_with( - stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) + stream_name, 'shardId-000000000000', + 'shardId-000000000002').should.throw(InvalidArgumentException) stream_response = conn.describe_stream(stream_name) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 8d034c7ff..f0d77d3e9 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -1,25 +1,31 @@ from __future__ import unicode_literals -import re - +import os, re import boto3 import boto.kms +import botocore.exceptions from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException + +from moto.kms.exceptions import NotFoundException as MotoNotFoundException import sure # noqa from moto import mock_kms, mock_kms_deprecated from nose.tools import assert_raises +from freezegun import freeze_time +from datetime import datetime +from dateutil.tz import tzutc @mock_kms_deprecated def test_create_key(): conn = boto.kms.connect_to_region("us-west-2") + with freeze_time("2015-01-01 00:00:00"): + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - - key['KeyMetadata']['Description'].should.equal("my key") - key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - key['KeyMetadata']['Enabled'].should.equal(True) + key['KeyMetadata']['Description'].should.equal("my key") + key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + key['KeyMetadata']['Enabled'].should.equal(True) + key['KeyMetadata']['CreationDate'].should.equal("1420070400") @mock_kms_deprecated @@ -125,7 +131,7 @@ def test_enable_key_rotation_via_arn(): def test_enable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.enable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(NotFoundException) @mock_kms_deprecated @@ -140,7 +146,7 @@ def test_enable_key_rotation_with_alias_name_should_fail(): alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) conn.enable_key_rotation.when.called_with( - 'alias/my-alias').should.throw(JSONResponseError) + 'alias/my-alias').should.throw(NotFoundException) @mock_kms_deprecated @@ -169,6 +175,7 @@ def test_encrypt(): conn = boto.kms.connect_to_region("us-west-2") response = conn.encrypt('key_id', 'encryptme'.encode('utf-8')) response['CiphertextBlob'].should.equal(b'ZW5jcnlwdG1l') + response['KeyId'].should.equal('key_id') @mock_kms_deprecated @@ -182,14 +189,14 @@ def test_decrypt(): def test_disable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.disable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(NotFoundException) @mock_kms_deprecated def test_get_key_rotation_status_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") conn.get_key_rotation_status.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(NotFoundException) @mock_kms_deprecated @@ -275,7 +282,7 @@ def test_put_key_policy_via_alias_should_not_update(): target_key_id=key['KeyMetadata']['KeyId']) conn.put_key_policy.when.called_with( - 'alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) + 'alias/my-key-alias', 'default', 'new policy').should.throw(NotFoundException) policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') policy['Policy'].should.equal('my policy') @@ -491,7 +498,14 @@ def test__delete_alias(): key_id = create_resp['KeyMetadata']['KeyId'] alias = 'alias/my-alias' + # added another alias here to make sure that the deletion of the alias can + # be done when there are multiple existing aliases. + another_create_resp = kms.create_key() + another_key_id = create_resp['KeyMetadata']['KeyId'] + another_alias = 'alias/another-alias' + kms.create_alias(alias, key_id) + kms.create_alias(another_alias, another_key_id) resp = kms.delete_alias(alias) @@ -588,9 +602,9 @@ def test__assert_valid_key_id(): import uuid _assert_valid_key_id.when.called_with( - "not-a-key").should.throw(JSONResponseError) + "not-a-key").should.throw(MotoNotFoundException) _assert_valid_key_id.when.called_with( - str(uuid.uuid4())).should_not.throw(JSONResponseError) + str(uuid.uuid4())).should_not.throw(MotoNotFoundException) @mock_kms_deprecated @@ -598,9 +612,9 @@ def test__assert_default_policy(): from moto.kms.responses import _assert_default_policy _assert_default_policy.when.called_with( - "not-default").should.throw(JSONResponseError) + "not-default").should.throw(MotoNotFoundException) _assert_default_policy.when.called_with( - "default").should_not.throw(JSONResponseError) + "default").should_not.throw(MotoNotFoundException) @mock_kms @@ -610,3 +624,360 @@ def test_kms_encrypt_boto3(): response = client.decrypt(CiphertextBlob=response['CiphertextBlob']) response['Plaintext'].should.equal(b'bar') + + +@mock_kms +def test_disable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='disable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + + +@mock_kms +def test_enable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='enable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + client.enable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == True + assert result["KeyMetadata"]["KeyState"] == 'Enabled' + + +@mock_kms +def test_schedule_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzutc()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_schedule_key_deletion_custom(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzutc()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_cancel_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='cancel-key-deletion') + client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + response = client.cancel_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + assert 'DeletionDate' not in result["KeyMetadata"] + + +@mock_kms +def test_update_key_description(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='old_description') + key_id = key['KeyMetadata']['KeyId'] + + result = client.update_key_description(KeyId=key_id, Description='new_description') + assert 'ResponseMetadata' in result + + +@mock_kms +def test_tag_resource(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='cancel-key-deletion') + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + + keyid = response['KeyId'] + response = client.tag_resource( + KeyId=keyid, + Tags=[ + { + 'TagKey': 'string', + 'TagValue': 'string' + }, + ] + ) + + # Shouldn't have any data, just header + assert len(response.keys()) == 1 + + +@mock_kms +def test_list_resource_tags(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='cancel-key-deletion') + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + + keyid = response['KeyId'] + response = client.tag_resource( + KeyId=keyid, + Tags=[ + { + 'TagKey': 'string', + 'TagValue': 'string' + }, + ] + ) + + response = client.list_resource_tags(KeyId=keyid) + assert response['Tags'][0]['TagKey'] == 'string' + assert response['Tags'][0]['TagValue'] == 'string' + + +@mock_kms +def test_generate_data_key_sizes(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-size') + + resp1 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_256' + ) + resp2 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_128' + ) + resp3 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + NumberOfBytes=64 + ) + + assert len(resp1['Plaintext']) == 32 + assert len(resp2['Plaintext']) == 16 + assert len(resp3['Plaintext']) == 64 + + +@mock_kms +def test_generate_data_key_decrypt(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-decrypt') + + resp1 = client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_256' + ) + resp2 = client.decrypt( + CiphertextBlob=resp1['CiphertextBlob'] + ) + + assert resp1['Plaintext'] == resp2['Plaintext'] + + +@mock_kms +def test_generate_data_key_invalid_size_params(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-size') + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_257' + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_128', + NumberOfBytes=16 + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'], + NumberOfBytes=2048 + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + +@mock_kms +def test_generate_data_key_invalid_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-size') + + with assert_raises(client.exceptions.NotFoundException): + client.generate_data_key( + KeyId='alias/randomnonexistantkey', + KeySpec='AES_256' + ) + + with assert_raises(client.exceptions.NotFoundException): + client.generate_data_key( + KeyId=key['KeyMetadata']['KeyId'] + '4', + KeySpec='AES_256' + ) + + +@mock_kms +def test_generate_data_key_without_plaintext_decrypt(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='generate-data-key-decrypt') + + resp1 = client.generate_data_key_without_plaintext( + KeyId=key['KeyMetadata']['KeyId'], + KeySpec='AES_256' + ) + + assert 'Plaintext' not in resp1 + + +@mock_kms +def test_enable_key_rotation_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.enable_key_rotation( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_disable_key_rotation_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.disable_key_rotation( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_enable_key_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.enable_key( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_disable_key_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.disable_key( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_cancel_key_deletion_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.cancel_key_deletion( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_schedule_key_deletion_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.schedule_key_deletion( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_get_key_rotation_status_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.get_key_rotation_status( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_get_key_policy_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.get_key_policy( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02', + PolicyName='default' + ) + + +@mock_kms +def test_list_key_policies_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.list_key_policies( + KeyId='12366f9b-1230-123d-123e-123e6ae60c02' + ) + + +@mock_kms +def test_put_key_policy_key_not_found(): + client = boto3.client('kms', region_name='us-east-1') + + with assert_raises(client.exceptions.NotFoundException): + client.put_key_policy( + KeyId='00000000-0000-0000-0000-000000000000', + PolicyName='default', + Policy='new policy' + ) diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 392b3f7e9..7048061f0 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,7 +1,10 @@ import boto3 import sure # noqa +import six +from botocore.exceptions import ClientError from moto import mock_logs, settings +from nose.tools import assert_raises _logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' @@ -11,4 +14,151 @@ def test_log_group_create(): conn = boto3.client('logs', 'us-west-2') log_group_name = 'dummy' response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + # AWS defaults to Never Expire for log group retention + assert response['logGroups'][0].get('retentionInDays') == None + response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_exceptions(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'dummp-stream' + conn.create_log_group(logGroupName=log_group_name) + with assert_raises(ClientError): + conn.create_log_group(logGroupName=log_group_name) + + # descrine_log_groups is not implemented yet + + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + with assert_raises(ClientError): + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + with assert_raises(ClientError): + conn.put_log_events( + logGroupName=log_group_name, + logStreamName="invalid-stream", + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + +@mock_logs +def test_put_logs(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + putRes = conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.get_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + events = res['events'] + nextSequenceToken = putRes['nextSequenceToken'] + assert isinstance(nextSequenceToken, six.string_types) == True + assert len(nextSequenceToken) == 56 + events.should.have.length_of(2) + + +@mock_logs +def test_filter_logs_interleaved(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.filter_log_events( + logGroupName=log_group_name, + logStreamNames=[log_stream_name], + interleaved=True, + ) + events = res['events'] + for original_message, resulting_event in zip(messages, events): + resulting_event['eventId'].should.equal(str(resulting_event['eventId'])) + resulting_event['timestamp'].should.equal(original_message['timestamp']) + resulting_event['message'].should.equal(original_message['message']) + +@mock_logs +def test_put_retention_policy(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == 7 + + response = conn.delete_log_group(logGroupName=log_group_name) + +@mock_logs +def test_delete_retention_policy(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.put_retention_policy(logGroupName=log_group_name, retentionInDays=7) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == 7 + + response = conn.delete_retention_policy(logGroupName=log_group_name) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + assert response['logGroups'][0].get('retentionInDays') == None + + response = conn.delete_log_group(logGroupName=log_group_name) + diff --git a/tests/test_opsworks/test_apps.py b/tests/test_opsworks/test_apps.py new file mode 100644 index 000000000..37d0f2fe4 --- /dev/null +++ b/tests/test_opsworks/test_apps.py @@ -0,0 +1,102 @@ +from __future__ import unicode_literals +import boto3 +from freezegun import freeze_time +import sure # noqa +import re + +from moto import mock_opsworks + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_create_app_response(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=second_stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + # ClientError + client.create_app.when.called_with( + StackId=stack_id, + Type="other", + Name="TestApp" + ).should.throw( + Exception, re.compile(r'already an app named "TestApp"') + ) + + # ClientError + client.create_app.when.called_with( + StackId="nothere", + Type="other", + Name="TestApp" + ).should.throw( + Exception, "nothere" + ) + +@freeze_time("2015-01-01") +@mock_opsworks +def test_describe_apps(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + app_id = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + )['AppId'] + + rv1 = client.describe_apps(StackId=stack_id) + rv2 = client.describe_apps(AppIds=[app_id]) + rv1['Apps'].should.equal(rv2['Apps']) + + rv1['Apps'][0]['Name'].should.equal("TestApp") + + # ClientError + client.describe_apps.when.called_with( + StackId=stack_id, + AppIds=[app_id] + ).should.throw( + Exception, "Please provide one or more app IDs or a stack ID" + ) + # ClientError + client.describe_apps.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_apps.when.called_with( + AppIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index 9c9e20878..f594a87c8 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -23,6 +23,20 @@ def test_create_instance(): Shortname="TestLayerShortName" )['LayerId'] + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + second_layer_id = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="SecondTestLayer", + Shortname="SecondTestLayerShortName" + )['LayerId'] + response = client.create_instance( StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" ) @@ -36,6 +50,14 @@ def test_create_instance(): client.create_instance.when.called_with( StackId=stack_id, LayerIds=["nothere"], InstanceType="t2.micro" ).should.throw(Exception, "nothere") + # ClientError + client.create_instance.when.called_with( + StackId=stack_id, LayerIds=[second_layer_id], InstanceType="t2.micro" + ).should.throw(Exception, "Please only provide layer IDs from the same stack") + # ClientError + client.start_instance.when.called_with( + InstanceId="nothere" + ).should.throw(Exception, "Unable to find instance with ID nothere") @mock_opsworks @@ -131,6 +153,32 @@ def test_describe_instances(): response.should.have.length_of(2) S2L1_i1.should_not.be.within([i["InstanceId"] for i in response]) + # ClientError + client.describe_instances.when.called_with( + StackId=S1, + LayerId=S1L1 + ).should.throw( + Exception, "Please provide either one or more" + ) + # ClientError + client.describe_instances.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + LayerId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + InstanceIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) + @mock_opsworks @mock_ec2 @@ -155,7 +203,7 @@ def test_ec2_integration(): )['LayerId'] instance_id = opsworks.create_instance( - StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" + StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro", SshKeyName="testSSH" )['InstanceId'] ec2 = boto3.client('ec2', region_name='us-east-1') diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index 03224feb0..9c640dfc3 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -62,6 +62,15 @@ def test_create_layer_response(): Exception, re.compile( r'already a layer with shortname "TestLayerShortName"') ) + # ClientError + client.create_layer.when.called_with( + StackId="nothere", + Type="custom", + Name="TestLayer", + Shortname="_" + ).should.throw( + Exception, "nothere" + ) @freeze_time("2015-01-01") @@ -86,3 +95,23 @@ def test_describe_layers(): rv1['Layers'].should.equal(rv2['Layers']) rv1['Layers'][0]['Name'].should.equal("TestLayer") + + # ClientError + client.describe_layers.when.called_with( + StackId=stack_id, + LayerIds=[layer_id] + ).should.throw( + Exception, "Please provide one or more layer IDs or a stack ID" + ) + # ClientError + client.describe_layers.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_layers.when.called_with( + LayerIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_organizations/__init__.py b/tests/test_organizations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py new file mode 100644 index 000000000..36933d41a --- /dev/null +++ b/tests/test_organizations/organizations_test_utils.py @@ -0,0 +1,152 @@ +from __future__ import unicode_literals + +import six +import sure # noqa +import datetime +from moto.organizations import utils + + +def test_make_random_org_id(): + org_id = utils.make_random_org_id() + org_id.should.match(utils.ORG_ID_REGEX) + + +def test_make_random_root_id(): + root_id = utils.make_random_root_id() + root_id.should.match(utils.ROOT_ID_REGEX) + + +def test_make_random_ou_id(): + root_id = utils.make_random_root_id() + ou_id = utils.make_random_ou_id(root_id) + ou_id.should.match(utils.OU_ID_REGEX) + + +def test_make_random_account_id(): + account_id = utils.make_random_account_id() + account_id.should.match(utils.ACCOUNT_ID_REGEX) + + +def test_make_random_create_account_status_id(): + create_account_status_id = utils.make_random_create_account_status_id() + create_account_status_id.should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) + + +def test_make_random_service_control_policy_id(): + service_control_policy_id = utils.make_random_service_control_policy_id() + service_control_policy_id.should.match(utils.SCP_ID_REGEX) + + +def validate_organization(response): + org = response['Organization'] + sorted(org.keys()).should.equal([ + 'Arn', + 'AvailablePolicyTypes', + 'FeatureSet', + 'Id', + 'MasterAccountArn', + 'MasterAccountEmail', + 'MasterAccountId', + ]) + org['Id'].should.match(utils.ORG_ID_REGEX) + org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) + org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL) + org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING']) + org['AvailablePolicyTypes'].should.equal([{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }]) + + +def validate_roots(org, response): + response.should.have.key('Roots').should.be.a(list) + response['Roots'].should_not.be.empty + root = response['Roots'][0] + root.should.have.key('Id').should.match(utils.ROOT_ID_REGEX) + root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + root['Id'], + )) + root.should.have.key('Name').should.be.a(six.string_types) + root.should.have.key('PolicyTypes').should.be.a(list) + root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED') + + +def validate_organizational_unit(org, response): + response.should.have.key('OrganizationalUnit').should.be.a(dict) + ou = response['OrganizationalUnit'] + ou.should.have.key('Id').should.match(utils.OU_ID_REGEX) + ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + ou['Id'], + )) + ou.should.have.key('Name').should.be.a(six.string_types) + + +def validate_account(org, account): + sorted(account.keys()).should.equal([ + 'Arn', + 'Email', + 'Id', + 'JoinedMethod', + 'JoinedTimestamp', + 'Name', + 'Status', + ]) + account['Id'].should.match(utils.ACCOUNT_ID_REGEX) + account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + account['Id'], + )) + account['Email'].should.match(utils.EMAIL_REGEX) + account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) + account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) + account['Name'].should.be.a(six.string_types) + account['JoinedTimestamp'].should.be.a(datetime.datetime) + + +def validate_create_account_status(create_status): + sorted(create_status.keys()).should.equal([ + 'AccountId', + 'AccountName', + 'CompletedTimestamp', + 'Id', + 'RequestedTimestamp', + 'State', + ]) + create_status['Id'].should.match(utils.CREATE_ACCOUNT_STATUS_ID_REGEX) + create_status['AccountId'].should.match(utils.ACCOUNT_ID_REGEX) + create_status['AccountName'].should.be.a(six.string_types) + create_status['State'].should.equal('SUCCEEDED') + create_status['RequestedTimestamp'].should.be.a(datetime.datetime) + create_status['CompletedTimestamp'].should.be.a(datetime.datetime) + +def validate_policy_summary(org, summary): + summary.should.be.a(dict) + summary.should.have.key('Id').should.match(utils.SCP_ID_REGEX) + summary.should.have.key('Arn').should.equal(utils.SCP_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + summary['Id'], + )) + summary.should.have.key('Name').should.be.a(six.string_types) + summary.should.have.key('Description').should.be.a(six.string_types) + summary.should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + summary.should.have.key('AwsManaged').should.be.a(bool) + +def validate_service_control_policy(org, response): + response.should.have.key('PolicySummary').should.be.a(dict) + response.should.have.key('Content').should.be.a(six.string_types) + validate_policy_summary(org, response['PolicySummary']) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py new file mode 100644 index 000000000..05f831e62 --- /dev/null +++ b/tests/test_organizations/test_organizations_boto3.py @@ -0,0 +1,594 @@ +from __future__ import unicode_literals + +import boto3 +import json +import six +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_organizations +from moto.organizations import utils +from .organizations_test_utils import ( + validate_organization, + validate_roots, + validate_organizational_unit, + validate_account, + validate_create_account_status, + validate_service_control_policy, + validate_policy_summary, +) + + +@mock_organizations +def test_create_organization(): + client = boto3.client('organizations', region_name='us-east-1') + response = client.create_organization(FeatureSet='ALL') + validate_organization(response) + response['Organization']['FeatureSet'].should.equal('ALL') + + +@mock_organizations +def test_describe_organization(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + response = client.describe_organization() + validate_organization(response) + + +@mock_organizations +def test_describe_organization_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_organization() + ex = e.exception + ex.operation_name.should.equal('DescribeOrganization') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException') + + +# Organizational Units + +@mock_organizations +def test_list_roots(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + response = client.list_roots() + validate_roots(org, response) + + +@mock_organizations +def test_create_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_name = 'ou01' + response = client.create_organizational_unit( + ParentId=root_id, + Name=ou_name, + ) + validate_organizational_unit(org, response) + response['OrganizationalUnit']['Name'].should.equal(ou_name) + + +@mock_organizations +def test_describe_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + response = client.describe_organizational_unit(OrganizationalUnitId=ou_id) + validate_organizational_unit(org, response) + + +@mock_organizations +def test_describe_organizational_unit_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + with assert_raises(ClientError) as e: + response = client.describe_organizational_unit( + OrganizationalUnitId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('DescribeOrganizationalUnit') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + + +@mock_organizations +def test_list_organizational_units_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + client.create_organizational_unit(ParentId=root_id, Name='ou01') + client.create_organizational_unit(ParentId=root_id, Name='ou02') + client.create_organizational_unit(ParentId=root_id, Name='ou03') + response = client.list_organizational_units_for_parent(ParentId=root_id) + response.should.have.key('OrganizationalUnits').should.be.a(list) + for ou in response['OrganizationalUnits']: + validate_organizational_unit(org, dict(OrganizationalUnit=ou)) + + +@mock_organizations +def test_list_organizational_units_for_parent_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.list_organizational_units_for_parent( + ParentId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('ListOrganizationalUnitsForParent') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + + +# Accounts +mockname = 'mock-account' +mockdomain = 'moto-example.org' +mockemail = '@'.join([mockname, mockdomain]) + + +@mock_organizations +def test_create_account(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + create_status = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus'] + validate_create_account_status(create_status) + create_status['AccountName'].should.equal(mockname) + + +@mock_organizations +def test_describe_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + response = client.describe_account(AccountId=account_id) + validate_account(org, response['Account']) + response['Account']['Name'].should.equal(mockname) + response['Account']['Email'].should.equal(mockemail) + + +@mock_organizations +def test_describe_account_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_account(AccountId=utils.make_random_account_id()) + ex = e.exception + ex.operation_name.should.equal('DescribeAccount') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + + +@mock_organizations +def test_list_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(5): + name = mockname + str(i) + email = name + '@' + mockdomain + client.create_account(AccountName=name, Email=email) + response = client.list_accounts() + response.should.have.key('Accounts') + accounts = response['Accounts'] + len(accounts).should.equal(5) + for account in accounts: + validate_account(org, account) + accounts[3]['Name'].should.equal(mockname + '3') + accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain) + + +@mock_organizations +def test_list_accounts_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + response = client.list_accounts_for_parent(ParentId=root_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_move_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + client.move_account( + AccountId=account_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response = client.list_accounts_for_parent(ParentId=ou01_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_list_parents_for_ou(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + response01 = client.list_parents(ChildId=ou01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + response02 = client.list_parents(ChildId=ou02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_parents_for_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_parents(ChildId=account01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + response02 = client.list_parents(ChildId=account02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT') + response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT') + response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT') + response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT') + response01['Children'][0]['Id'].should.equal(account01_id) + response01['Children'][0]['Type'].should.equal('ACCOUNT') + response02['Children'][0]['Id'].should.equal(ou01_id) + response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + response03['Children'][0]['Id'].should.equal(account02_id) + response03['Children'][0]['Type'].should.equal('ACCOUNT') + response04['Children'][0]['Id'].should.equal(ou02_id) + response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=utils.make_random_root_id(), + ChildType='ACCOUNT' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=root_id, + ChildType='BLEE' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +# Service Control Policies +policy_doc01 = dict( + Version='2012-10-17', + Statement=[dict( + Sid='MockPolicyStatement', + Effect='Allow', + Action='s3:*', + Resource='*', + )] +) + +@mock_organizations +def test_create_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + policy = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy'] + validate_service_control_policy(org, policy) + policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy') + policy['PolicySummary']['Description'].should.equal('A dummy service control policy') + policy['Content'].should.equal(json.dumps(policy_doc01)) + + +@mock_organizations +def test_describe_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + policy = client.describe_policy(PolicyId=policy_id)['Policy'] + validate_service_control_policy(org, policy) + policy['PolicySummary']['Name'].should.equal('MockServiceControlPolicy') + policy['PolicySummary']['Description'].should.equal('A dummy service control policy') + policy['Content'].should.equal(json.dumps(policy_doc01)) + + +@mock_organizations +def test_describe_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = 'p-47fhe9s3' + with assert_raises(ClientError) as e: + response = client.describe_policy(PolicyId=policy_id) + ex = e.exception + ex.operation_name.should.equal('DescribePolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('PolicyNotFoundException') + with assert_raises(ClientError) as e: + response = client.describe_policy(PolicyId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('DescribePolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_attach_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_organizations +def test_attach_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + root_id='r-dj873' + ou_id='ou-gi99-i7r8eh2i2' + account_id='126644886543' + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=root_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId=account_id) + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + with assert_raises(ClientError) as e: + response = client.attach_policy(PolicyId=policy_id, TargetId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('AttachPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_list_polices(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(0,4): + client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy' + str(i), + Type='SERVICE_CONTROL_POLICY' + ) + response = client.list_policies(Filter='SERVICE_CONTROL_POLICY') + for policy in response['Policies']: + validate_policy_summary(org, policy) + + +@mock_organizations +def test_list_policies_for_target(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + response = client.list_policies_for_target( + TargetId=ou_id, + Filter='SERVICE_CONTROL_POLICY', + ) + for policy in response['Policies']: + validate_policy_summary(org, policy) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.list_policies_for_target( + TargetId=account_id, + Filter='SERVICE_CONTROL_POLICY', + ) + for policy in response['Policies']: + validate_policy_summary(org, policy) + + +@mock_organizations +def test_list_policies_for_target_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + ou_id='ou-gi99-i7r8eh2i2' + account_id='126644886543' + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId=ou_id, + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId=account_id, + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_policies_for_target( + TargetId='meaninglessstring', + Filter='SERVICE_CONTROL_POLICY', + ) + ex = e.exception + ex.operation_name.should.equal('ListPoliciesForTarget') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + + +@mock_organizations +def test_list_targets_for_policy(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + policy_id = client.create_policy( + Content=json.dumps(policy_doc01), + Description='A dummy service control policy', + Name='MockServiceControlPolicy', + Type='SERVICE_CONTROL_POLICY' + )['Policy']['PolicySummary']['Id'] + client.attach_policy(PolicyId=policy_id, TargetId=root_id) + client.attach_policy(PolicyId=policy_id, TargetId=ou_id) + client.attach_policy(PolicyId=policy_id, TargetId=account_id) + response = client.list_targets_for_policy(PolicyId=policy_id) + for target in response['Targets']: + target.should.be.a(dict) + target.should.have.key('Name').should.be.a(six.string_types) + target.should.have.key('Arn').should.be.a(six.string_types) + target.should.have.key('TargetId').should.be.a(six.string_types) + target.should.have.key('Type').should.be.within( + ['ROOT', 'ORGANIZATIONAL_UNIT', 'ACCOUNT'] + ) + + +@mock_organizations +def test_list_targets_for_policy_exception(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL')['Organization'] + policy_id = 'p-47fhe9s3' + with assert_raises(ClientError) as e: + response = client.list_targets_for_policy(PolicyId=policy_id) + ex = e.exception + ex.operation_name.should.equal('ListTargetsForPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('PolicyNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_targets_for_policy(PolicyId='meaninglessstring') + ex = e.exception + ex.operation_name.should.equal('ListTargetsForPolicy') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') + diff --git a/tests/test_packages/__init__.py b/tests/test_packages/__init__.py new file mode 100644 index 000000000..bf582e0b3 --- /dev/null +++ b/tests/test_packages/__init__.py @@ -0,0 +1,8 @@ +from __future__ import unicode_literals + +import logging +# Disable extra logging for tests +logging.getLogger('boto').setLevel(logging.CRITICAL) +logging.getLogger('boto3').setLevel(logging.CRITICAL) +logging.getLogger('botocore').setLevel(logging.CRITICAL) +logging.getLogger('nose').setLevel(logging.CRITICAL) diff --git a/tests/test_packages/test_httpretty.py b/tests/test_packages/test_httpretty.py new file mode 100644 index 000000000..48277a2de --- /dev/null +++ b/tests/test_packages/test_httpretty.py @@ -0,0 +1,37 @@ +# #!/usr/bin/env python +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import mock + +from moto.packages.httpretty.core import HTTPrettyRequest, fake_gethostname, fake_gethostbyname + + +def test_parse_querystring(): + + core = HTTPrettyRequest(headers='test test HTTP/1.1') + + qs = 'test test' + response = core.parse_querystring(qs) + + assert response == {} + +def test_parse_request_body(): + core = HTTPrettyRequest(headers='test test HTTP/1.1') + + qs = 'test' + response = core.parse_request_body(qs) + + assert response == 'test' + +def test_fake_gethostname(): + + response = fake_gethostname() + + assert response == 'localhost' + +def test_fake_gethostbyname(): + + host = 'test' + response = fake_gethostbyname(host=host) + + assert response == '127.0.0.1' \ No newline at end of file diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 5bf733dc6..af330e672 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -174,8 +174,8 @@ def test_add_security_group_to_database(): def test_add_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") + subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.2.0/24") subnet_ids = [subnet1.id, subnet2.id] conn = boto.rds.connect_to_region("us-west-2") @@ -191,7 +191,7 @@ def test_add_database_subnet_group(): def test_describe_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") conn = boto.rds.connect_to_region("us-west-2") conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) @@ -209,7 +209,7 @@ def test_describe_database_subnet_group(): def test_delete_database_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") conn = boto.rds.connect_to_region("us-west-2") conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) @@ -227,7 +227,7 @@ def test_delete_database_subnet_group(): def test_create_database_in_subnet_group(): vpc_conn = boto.vpc.connect_to_region("us-west-2") vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") conn = boto.rds.connect_to_region("us-west-2") conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 4ab7dbc60..aacaf04f1 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -18,18 +18,57 @@ def test_create_database(): MasterUsername='root', MasterUserPassword='hunter2', Port=1234, - DBSecurityGroups=["my_sg"]) - database['DBInstance']['AllocatedStorage'].should.equal(10) - database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") - database['DBInstance']['LicenseModel'].should.equal("license-included") - database['DBInstance']['MasterUsername'].should.equal("root") - database['DBInstance']['DBSecurityGroups'][0][ + DBSecurityGroups=["my_sg"], + VpcSecurityGroupIds=['sg-123456']) + db_instance = database['DBInstance'] + db_instance['AllocatedStorage'].should.equal(10) + db_instance['DBInstanceClass'].should.equal("db.m1.small") + db_instance['LicenseModel'].should.equal("license-included") + db_instance['MasterUsername'].should.equal("root") + db_instance['DBSecurityGroups'][0][ 'DBSecurityGroupName'].should.equal('my_sg') - database['DBInstance']['DBInstanceArn'].should.equal( + db_instance['DBInstanceArn'].should.equal( 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') - database['DBInstance']['DBInstanceStatus'].should.equal('available') - database['DBInstance']['DBName'].should.equal('staging-postgres') - database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") + db_instance['DBInstanceStatus'].should.equal('available') + db_instance['DBName'].should.equal('staging-postgres') + db_instance['DBInstanceIdentifier'].should.equal("db-master-1") + db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) + db_instance['DbiResourceId'].should.contain("db-") + db_instance['CopyTagsToSnapshot'].should.equal(False) + db_instance['InstanceCreateTime'].should.be.a("datetime.datetime") + db_instance['VpcSecurityGroups'][0]['VpcSecurityGroupId'].should.equal('sg-123456') + + +@mock_rds2 +def test_create_database_non_existing_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance.when.called_with( + DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + OptionGroupName='non-existing').should.throw(ClientError) + + +@mock_rds2 +def test_create_database_with_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='my-og', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + OptionGroupName='my-og') + db_instance = database['DBInstance'] + db_instance['AllocatedStorage'].should.equal(10) + db_instance['DBInstanceClass'].should.equal('db.m1.small') + db_instance['DBName'].should.equal('staging-postgres') + db_instance['OptionGroupMemberships'][0]['OptionGroupName'].should.equal('my-og') @mock_rds2 @@ -50,7 +89,7 @@ def test_stop_database(): # test stopping database should shutdown response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') # test rdsclient error when trying to stop an already stopped database conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) # test stopping a stopped database with snapshot should error and no snapshot should exist for that call @@ -76,10 +115,10 @@ def test_start_database(): mydb['DBInstanceStatus'].should.equal('available') # test starting an already started database should error conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # stop and test start - should go from shutdown to available, create snapshot and check snapshot + # stop and test start - should go from stopped to available, create snapshot and check snapshot response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap') response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') response = conn.describe_db_snapshots() response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) @@ -93,7 +132,7 @@ def test_start_database(): # test stopping database not invoking snapshot should succeed. response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') @mock_rds2 @@ -197,6 +236,9 @@ def test_get_databases_paginated(): resp2 = conn.describe_db_instances(Marker=resp["Marker"]) resp2["DBInstances"].should.have.length_of(1) + resp3 = conn.describe_db_instances(MaxRecords=100) + resp3["DBInstances"].should.have.length_of(51) + @mock_rds2 def test_describe_non_existant_database(): @@ -220,9 +262,33 @@ def test_modify_db_instance(): instances['DBInstances'][0]['AllocatedStorage'].should.equal(10) conn.modify_db_instance(DBInstanceIdentifier='db-master-1', AllocatedStorage=20, - ApplyImmediately=True) + ApplyImmediately=True, + VpcSecurityGroupIds=['sg-123456']) instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) + instances['DBInstances'][0]['VpcSecurityGroups'][0]['VpcSecurityGroupId'].should.equal('sg-123456') + + +@mock_rds2 +def test_rename_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + list(instances['DBInstances']).should.have.length_of(1) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-2").should.throw(ClientError) + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + NewDBInstanceIdentifier='db-master-2', + ApplyImmediately=True) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-1").should.throw(ClientError) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2") + list(instances['DBInstances']).should.have.length_of(1) @mock_rds2 @@ -312,6 +378,49 @@ def test_create_db_snapshots(): snapshot.get('Engine').should.equal('postgres') snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([]) + + +@mock_rds2 +def test_create_db_snapshots_copy_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + CopyTagsToSnapshot=True, + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) @mock_rds2 @@ -326,8 +435,6 @@ def test_describe_db_snapshots(): MasterUserPassword='hunter2', Port=1234, DBSecurityGroups=["my_sg"]) - conn.describe_db_snapshots.when.called_with( - DBInstanceIdentifier="db-primary-1").should.throw(ClientError) created = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', DBSnapshotIdentifier='snapshot-1').get('DBSnapshot') @@ -342,6 +449,11 @@ def test_describe_db_snapshots(): snapshot.should.equal(created) snapshot.get('Engine').should.equal('postgres') + conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-2') + snapshots = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') + snapshots.should.have.length_of(2) + @mock_rds2 def test_delete_db_snapshot(): @@ -626,6 +738,117 @@ def test_remove_tags_db(): len(result['TagList']).should.equal(1) +@mock_rds2 +def test_list_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:foo') + result['TagList'].should.equal([]) + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshot']['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(3) + + +@mock_rds2 +def test_remove_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + len(result['TagList']).should.equal(1) + + @mock_rds2 def test_add_tags_option_group(): conn = boto3.client('rds', region_name='us-west-2') @@ -860,9 +1083,9 @@ def test_create_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet1 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] subnet2 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.2.0/24')['Subnet'] subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] conn = boto3.client('rds', region_name='us-west-2') @@ -884,7 +1107,7 @@ def test_create_database_in_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', @@ -909,7 +1132,7 @@ def test_describe_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", @@ -938,7 +1161,7 @@ def test_delete_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -964,7 +1187,7 @@ def test_list_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -991,7 +1214,7 @@ def test_add_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() @@ -1022,7 +1245,7 @@ def test_remove_tags_database_subnet_group(): vpc_conn = boto3.client('ec2', 'us-west-2') vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + VpcId=vpc['VpcId'], CidrBlock='10.0.1.0/24')['Subnet'] conn = boto3.client('rds', region_name='us-west-2') result = conn.describe_db_subnet_groups() diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index cebaa3ec7..541614788 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +import datetime + import boto import boto3 from boto.redshift.exceptions import ( @@ -7,7 +9,7 @@ from boto.redshift.exceptions import ( ClusterParameterGroupNotFound, ClusterSecurityGroupNotFound, ClusterSubnetGroupNotFound, - InvalidSubnet, + InvalidSubnet ) from botocore.exceptions import ( ClientError @@ -32,6 +34,47 @@ def test_create_cluster_boto3(): MasterUserPassword='password', ) response['Cluster']['NodeType'].should.equal('ds2.xlarge') + create_time = response['Cluster']['ClusterCreateTime'] + create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo)) + + +@mock_redshift +def test_create_snapshot_copy_grant(): + client = boto3.client('redshift', region_name='us-east-1') + grants = client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + KmsKeyId='fake', + ) + grants['SnapshotCopyGrant']['SnapshotCopyGrantName'].should.equal('test-us-east-1') + grants['SnapshotCopyGrant']['KmsKeyId'].should.equal('fake') + + client.delete_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + ) + + client.describe_snapshot_copy_grants.when.called_with( + SnapshotCopyGrantName='test-us-east-1', + ).should.throw(Exception) + + +@mock_redshift +def test_create_many_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + + for i in range(10): + client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1-{0}'.format(i), + KmsKeyId='fake', + ) + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(10) + + +@mock_redshift +def test_no_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(0) @mock_redshift_deprecated @@ -134,30 +177,29 @@ def test_default_cluster_attributes(): cluster['NumberOfNodes'].should.equal(1) -@mock_redshift_deprecated -@mock_ec2_deprecated +@mock_redshift +@mock_ec2 def test_create_cluster_in_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() - redshift_conn.create_cluster_subnet_group( - "my_subnet_group", - "This is my subnet group", - subnet_ids=[subnet.id], + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_subnet_group( + ClusterSubnetGroupName="my_subnet_group", + Description="This is my subnet group", + SubnetIds=[subnet.id], ) - redshift_conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_subnet_group_name='my_subnet_group', + client.create_cluster( + ClusterIdentifier="my_cluster", + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSubnetGroupName='my_subnet_group', ) - cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] + cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") + cluster = cluster_response['Clusters'][0] cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') @@ -294,6 +336,24 @@ def test_create_cluster_with_vpc_security_groups_boto3(): list(group_ids).should.equal([security_group.id]) +@mock_redshift +def test_create_cluster_with_iam_roles(): + iam_roles_arn = ['arn:aws:iam:::role/my-iam-role', ] + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + IamRoles=iam_roles_arn + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] + iam_roles_arn.should.equal(iam_roles) + + @mock_redshift_deprecated def test_create_cluster_with_parameter_group(): conn = boto.connect_redshift() @@ -324,29 +384,41 @@ def test_describe_non_existent_cluster(): conn.describe_clusters.when.called_with( "not-a-cluster").should.throw(ClusterNotFound) - @mock_redshift_deprecated def test_delete_cluster(): conn = boto.connect_redshift() - cluster_identifier = 'my_cluster' + cluster_identifier = "my_cluster" + snapshot_identifier = "my_snapshot" conn.create_cluster( cluster_identifier, - node_type='single-node', + node_type="single-node", master_username="username", master_user_password="password", ) + conn.delete_cluster.when.called_with(cluster_identifier, False).should.throw(AttributeError) + clusters = conn.describe_clusters()['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(1) - conn.delete_cluster(cluster_identifier) + conn.delete_cluster( + cluster_identifier=cluster_identifier, + skip_final_cluster_snapshot=False, + final_cluster_snapshot_identifier=snapshot_identifier + ) clusters = conn.describe_clusters()['DescribeClustersResponse'][ 'DescribeClustersResult']['Clusters'] list(clusters).should.have.length_of(0) + snapshots = conn.describe_cluster_snapshots()["DescribeClusterSnapshotsResponse"][ + "DescribeClusterSnapshotsResult"]["Snapshots"] + list(snapshots).should.have.length_of(1) + + assert snapshot_identifier in snapshots[0]["SnapshotIdentifier"] + # Delete invalid id conn.delete_cluster.when.called_with( "not-a-cluster").should.throw(ClusterNotFound) @@ -403,28 +475,26 @@ def test_modify_cluster(): cluster['NumberOfNodes'].should.equal(1) -@mock_redshift_deprecated -@mock_ec2_deprecated +@mock_redshift +@mock_ec2 def test_create_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet1 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + subnet2 = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.1.0/24") + client = boto3.client('redshift', region_name='us-east-1') - redshift_conn = boto.connect_redshift() - - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet1.id, subnet2.id], + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet1.id, subnet2.id], ) - subnets_response = redshift_conn.describe_cluster_subnet_groups( - "my_subnet") - my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] + subnets_response = client.describe_cluster_subnet_groups( + ClusterSubnetGroupName="my_subnet_group") + my_subnet = subnets_response['ClusterSubnetGroups'][0] - my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") + my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet_group") my_subnet['Description'].should.equal("This is my subnet group") subnet_ids = [subnet['SubnetIdentifier'] for subnet in my_subnet['Subnets']] @@ -449,35 +519,33 @@ def test_describe_non_existent_subnet_group(): "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) -@mock_redshift_deprecated -@mock_ec2_deprecated +@mock_redshift +@mock_ec2 def test_delete_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock="10.0.0.0/16") + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock="10.0.0.0/24") + client = boto3.client('redshift', region_name='us-east-1') - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet.id], + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id], ) - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets_response = client.describe_cluster_subnet_groups() + subnets = subnets_response['ClusterSubnetGroups'] subnets.should.have.length_of(1) - redshift_conn.delete_cluster_subnet_group("my_subnet") + client.delete_cluster_subnet_group(ClusterSubnetGroupName="my_subnet_group") - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets_response = client.describe_cluster_subnet_groups() + subnets = subnets_response['ClusterSubnetGroups'] subnets.should.have.length_of(0) # Delete invalid id - redshift_conn.delete_cluster_subnet_group.when.called_with( - "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + client.delete_cluster_subnet_group.when.called_with( + ClusterSubnetGroupName="not-a-subnet-group").should.throw(ClientError) @mock_redshift_deprecated @@ -582,7 +650,6 @@ def test_delete_cluster_parameter_group(): "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) - @mock_redshift def test_create_cluster_snapshot_of_non_existent_cluster(): client = boto3.client('redshift', region_name='us-east-1') @@ -627,7 +694,8 @@ def test_create_cluster_snapshot(): def test_describe_cluster_snapshots(): client = boto3.client('redshift', region_name='us-east-1') cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' + snapshot_identifier_1 = 'my_snapshot_1' + snapshot_identifier_2 = 'my_snapshot_2' client.create_cluster( DBName='test-db', @@ -639,19 +707,33 @@ def test_describe_cluster_snapshots(): ) client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, + SnapshotIdentifier=snapshot_identifier_1, + ClusterIdentifier=cluster_identifier, + ) + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier_2, ClusterIdentifier=cluster_identifier, ) + resp_snap_1 = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier_1) + snapshot_1 = resp_snap_1['Snapshots'][0] + snapshot_1['SnapshotIdentifier'].should.equal(snapshot_identifier_1) + snapshot_1['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot_1['NumberOfNodes'].should.equal(1) + snapshot_1['NodeType'].should.equal('ds2.xlarge') + snapshot_1['MasterUsername'].should.equal('username') + + resp_snap_2 = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier_2) + snapshot_2 = resp_snap_2['Snapshots'][0] + snapshot_2['SnapshotIdentifier'].should.equal(snapshot_identifier_2) + snapshot_2['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot_2['NumberOfNodes'].should.equal(1) + snapshot_2['NodeType'].should.equal('ds2.xlarge') + snapshot_2['MasterUsername'].should.equal('username') + resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) - resp_snap = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier) - resp_clust['Snapshots'][0].should.equal(resp_snap['Snapshots'][0]) - snapshot = resp_snap['Snapshots'][0] - snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) - snapshot['ClusterIdentifier'].should.equal(cluster_identifier) - snapshot['NumberOfNodes'].should.equal(1) - snapshot['NodeType'].should.equal('ds2.xlarge') - snapshot['MasterUsername'].should.equal('username') + resp_clust['Snapshots'][0].should.equal(resp_snap_1['Snapshots'][0]) + resp_clust['Snapshots'][1].should.equal(resp_snap_2['Snapshots'][0]) @mock_redshift @@ -761,6 +843,48 @@ def test_create_cluster_from_snapshot(): new_cluster['Endpoint']['Port'].should.equal(1234) +@mock_redshift +def test_create_cluster_from_snapshot_with_waiter(): + client = boto3.client('redshift', region_name='us-east-1') + original_cluster_identifier = 'original-cluster' + original_snapshot_identifier = 'original-snapshot' + new_cluster_identifier = 'new-cluster' + + client.create_cluster( + ClusterIdentifier=original_cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=original_snapshot_identifier, + ClusterIdentifier=original_cluster_identifier + ) + response = client.restore_from_cluster_snapshot( + ClusterIdentifier=new_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + Port=1234 + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + client.get_waiter('cluster_restored').wait( + ClusterIdentifier=new_cluster_identifier, + WaiterConfig={ + 'Delay': 1, + 'MaxAttempts': 2, + } + ) + + response = client.describe_clusters( + ClusterIdentifier=new_cluster_identifier + ) + new_cluster = response['Clusters'][0] + new_cluster['NodeType'].should.equal('ds2.xlarge') + new_cluster['MasterUsername'].should.equal('username') + new_cluster['Endpoint']['Port'].should.equal(1234) + + @mock_redshift def test_create_cluster_from_non_existent_snapshot(): client = boto3.client('redshift', region_name='us-east-1') @@ -1042,3 +1166,98 @@ def test_tagged_resource_not_found_error(): ResourceName='bad:arn' ).should.throw(ClientError, "Tagging is not supported for this type of resource") + +@mock_redshift +def test_enable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + Encrypted=True, + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2' + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') + + +@mock_redshift +def test_enable_snapshot_copy_unencrypted(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + + +@mock_redshift +def test_disable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.disable_snapshot_copy( + ClusterIdentifier='test', + ) + response = client.describe_clusters(ClusterIdentifier='test') + response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') + + +@mock_redshift +def test_modify_snapshot_copy_retention_period(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.modify_snapshot_copy_retention_period( + ClusterIdentifier='test', + RetentionPeriod=5, + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5) diff --git a/tests/test_resourcegroups/__init__.py b/tests/test_resourcegroups/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_resourcegroups/test_resourcegroups.py b/tests/test_resourcegroups/test_resourcegroups.py new file mode 100644 index 000000000..bb3624413 --- /dev/null +++ b/tests/test_resourcegroups/test_resourcegroups.py @@ -0,0 +1,165 @@ +from __future__ import unicode_literals + +import boto3 +import json +import sure # noqa + +from moto import mock_resourcegroups + + +@mock_resourcegroups +def test_create_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = resource_groups.create_group( + Name="test_resource_group", + Description="description", + ResourceQuery={ + "Type": "TAG_FILTERS_1_0", + "Query": json.dumps( + { + "ResourceTypeFilters": ["AWS::AllSupported"], + "TagFilters": [ + {"Key": "resources_tag_key", "Values": ["resources_tag_value"]} + ], + } + ), + }, + Tags={"resource_group_tag_key": "resource_group_tag_value"} + ) + response["Group"]["Name"].should.contain("test_resource_group") + response["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0") + response["Tags"]["resource_group_tag_key"].should.contain("resource_group_tag_value") + + +@mock_resourcegroups +def test_delete_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.delete_group(GroupName="test_resource_group") + response["Group"]["Name"].should.contain("test_resource_group") + + response = resource_groups.list_groups() + response["GroupIdentifiers"].should.have.length_of(0) + response["Groups"].should.have.length_of(0) + + +@mock_resourcegroups +def test_get_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.get_group(GroupName="test_resource_group") + response["Group"]["Description"].should.contain("description") + + return response + + +@mock_resourcegroups +def test_get_group_query(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.get_group_query(GroupName="test_resource_group") + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("TAG_FILTERS_1_0") + + +@mock_resourcegroups +def test_get_tags(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_group() + + response = resource_groups.get_tags(Arn=response["Group"]["GroupArn"]) + response["Tags"].should.have.length_of(1) + response["Tags"]["resource_group_tag_key"].should.contain("resource_group_tag_value") + + return response + + +@mock_resourcegroups +def test_list_groups(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.list_groups() + response["GroupIdentifiers"].should.have.length_of(1) + response["Groups"].should.have.length_of(1) + + +@mock_resourcegroups +def test_tag(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_tags() + + response = resource_groups.tag( + Arn=response["Arn"], + Tags={"resource_group_tag_key_2": "resource_group_tag_value_2"} + ) + response["Tags"]["resource_group_tag_key_2"].should.contain("resource_group_tag_value_2") + + response = resource_groups.get_tags(Arn=response["Arn"]) + response["Tags"].should.have.length_of(2) + response["Tags"]["resource_group_tag_key_2"].should.contain("resource_group_tag_value_2") + + +@mock_resourcegroups +def test_untag(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + response = test_get_tags() + + response = resource_groups.untag(Arn=response["Arn"], Keys=["resource_group_tag_key"]) + response["Keys"].should.contain("resource_group_tag_key") + + response = resource_groups.get_tags(Arn=response["Arn"]) + response["Tags"].should.have.length_of(0) + + +@mock_resourcegroups +def test_update_group(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_get_group() + + response = resource_groups.update_group( + GroupName="test_resource_group", + Description="description_2", + ) + response["Group"]["Description"].should.contain("description_2") + + response = resource_groups.get_group(GroupName="test_resource_group") + response["Group"]["Description"].should.contain("description_2") + + +@mock_resourcegroups +def test_update_group_query(): + resource_groups = boto3.client("resource-groups", region_name="us-east-1") + + test_create_group() + + response = resource_groups.update_group_query( + GroupName="test_resource_group", + ResourceQuery={ + "Type": "CLOUDFORMATION_STACK_1_0", + "Query": json.dumps( + { + "ResourceTypeFilters": ["AWS::AllSupported"], + "StackIdentifier": ( + "arn:aws:cloudformation:eu-west-1:012345678912:stack/" + "test_stack/c223eca0-e744-11e8-8910-500c41f59083" + ) + } + ), + }, + ) + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("CLOUDFORMATION_STACK_1_0") + + response = resource_groups.get_group_query(GroupName="test_resource_group") + response["GroupQuery"]["ResourceQuery"]["Type"].should.contain("CLOUDFORMATION_STACK_1_0") diff --git a/tests/test_resourcegroupstaggingapi/__init__.py b/tests/test_resourcegroupstaggingapi/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py new file mode 100644 index 000000000..8015472bf --- /dev/null +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -0,0 +1,285 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2, mock_elbv2 + + +@mock_s3 +@mock_resourcegroupstaggingapi +def test_get_resources_s3(): + # Tests pagination + s3_client = boto3.client('s3', region_name='eu-central-1') + + # Will end up having key1,key2,key3,key4 + response_keys = set() + + # Create 4 buckets + for i in range(1, 5): + i_str = str(i) + s3_client.create_bucket(Bucket='test_bucket' + i_str) + s3_client.put_bucket_tagging( + Bucket='test_bucket' + i_str, + Tagging={'TagSet': [{'Key': 'key' + i_str, 'Value': 'value' + i_str}]} + ) + response_keys.add('key' + i_str) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources(ResourcesPerPage=2) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(2) + + resp = rtapi.get_resources( + ResourcesPerPage=2, + PaginationToken=resp['PaginationToken'] + ) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(0) + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_resources_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + instances = client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + instance_id = instances['Instances'][0]['InstanceId'] + image_id = client.create_image(Name='testami', InstanceId=instance_id)['ImageId'] + + client.create_tags( + Resources=[image_id], + Tags=[{'Key': 'ami', 'Value': 'test'}] + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources() + # Check we have 1 entry for Instance, 1 Entry for AMI + resp['ResourceTagMappingList'].should.have.length_of(2) + + # 1 Entry for AMI + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:image']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('image/') + + # As were iterating the same data, this rules out that the test above was a fluke + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:instance']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + # Basic test of tag filters + resp = rtapi.get_resources(TagFilters=[{'Key': 'MY_TAG1', 'Values': ['MY_VALUE1', 'some_other_value']}]) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_keys_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_keys() + + resp['TagKeys'].should.contain('MY_TAG1') + resp['TagKeys'].should.contain('MY_TAG2') + resp['TagKeys'].should.contain('MY_TAG3') + + # TODO test pagenation + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_values_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE4', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE5', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE6', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_values(Key='MY_TAG1') + + resp['TagValues'].should.contain('MY_VALUE1') + resp['TagValues'].should.contain('MY_VALUE4') + +@mock_ec2 +@mock_elbv2 +@mock_resourcegroupstaggingapi +def test_get_resources_elbv2(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.0/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[ + { + 'Key': 'key_name', + 'Value': 'a_value' + }, + { + 'Key': 'key_2', + 'Value': 'val2' + } + ] + ) + + conn.create_load_balancer( + Name='my-other-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='us-east-1') + + resp = rtapi.get_resources(ResourceTypeFilters=['elasticloadbalancer:loadbalancer']) + + resp['ResourceTagMappingList'].should.have.length_of(2) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('loadbalancer/') + resp = rtapi.get_resources( + ResourceTypeFilters=['elasticloadbalancer:loadbalancer'], + TagFilters=[{ + 'Key': 'key_name' + }] + ) + + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['Tags'].should.contain({'Key': 'key_name', 'Value': 'a_value'}) + + # TODO test pagenation diff --git a/tests/test_resourcegroupstaggingapi/test_server.py b/tests/test_resourcegroupstaggingapi/test_server.py new file mode 100644 index 000000000..311b1f03e --- /dev/null +++ b/tests/test_resourcegroupstaggingapi/test_server.py @@ -0,0 +1,24 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_resourcegroupstaggingapi_list(): + backend = server.create_backend_app("resourcegroupstaggingapi") + test_client = backend.test_client() + # do test + + headers = { + 'X-Amz-Target': 'ResourceGroupsTaggingAPI_20170126.GetResources', + 'X-Amz-Date': '20171114T234623Z' + } + resp = test_client.post('/', headers=headers, data='{}') + + assert resp.status_code == 200 + assert b'ResourceTagMappingList' in resp.data diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 76217b9d9..de9465d6d 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -98,8 +98,19 @@ def test_rrset(): rrsets.should.have.length_of(1) rrsets[0].resource_records[0].should.equal('5.6.7.8') + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "TXT") + change.add_value("foo") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid) + rrsets.should.have.length_of(2) + rrsets[0].resource_records[0].should.equal('5.6.7.8') + rrsets[1].resource_records[0].should.equal('foo') + changes = ResourceRecordSets(conn, zoneid) changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") + changes.add_change("DELETE", "foo.bar.testdns.aws.com", "TXT") changes.commit() changes = ResourceRecordSets(conn, zoneid) @@ -113,12 +124,12 @@ def test_rrset(): rrsets.should.have.length_of(2) rrsets = conn.get_all_rrsets( - zoneid, name="foo.bar.testdns.aws.com", type="A") + zoneid, name="bar.foo.testdns.aws.com", type="A") rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('1.2.3.4') + rrsets[0].resource_records[0].should.equal('5.6.7.8') rrsets = conn.get_all_rrsets( - zoneid, name="bar.foo.testdns.aws.com", type="A") + zoneid, name="foo.bar.testdns.aws.com", type="A") rrsets.should.have.length_of(2) resource_records = [rr for rr_set in rrsets for rr in rr_set.resource_records] resource_records.should.contain('1.2.3.4') @@ -162,14 +173,16 @@ def test_alias_rrset(): changes.commit() rrsets = conn.get_all_rrsets(zoneid, type="A") - rrset_records = [(rr_set.name, rr) for rr_set in rrsets for rr in rr_set.resource_records] - rrset_records.should.have.length_of(2) - rrset_records.should.contain(('foo.alias.testdns.aws.com', 'foo.testdns.aws.com')) - rrset_records.should.contain(('bar.alias.testdns.aws.com', 'bar.testdns.aws.com')) - rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com') + alias_targets = [rr_set.alias_dns_name for rr_set in rrsets] + alias_targets.should.have.length_of(2) + alias_targets.should.contain('foo.testdns.aws.com') + alias_targets.should.contain('bar.testdns.aws.com') + rrsets[0].alias_dns_name.should.equal('foo.testdns.aws.com') + rrsets[0].resource_records.should.have.length_of(0) rrsets = conn.get_all_rrsets(zoneid, type="CNAME") rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('bar.testdns.aws.com') + rrsets[0].alias_dns_name.should.equal('bar.testdns.aws.com') + rrsets[0].resource_records.should.have.length_of(0) @mock_route53_deprecated @@ -520,12 +533,12 @@ def test_change_resource_record_sets_crud_valid(): # Create A Record. a_record_endpoint_payload = { - 'Comment': 'create A record prod.redis.db', + 'Comment': 'Create A record prod.redis.db', 'Changes': [ { 'Action': 'CREATE', 'ResourceRecordSet': { - 'Name': 'prod.redis.db', + 'Name': 'prod.redis.db.', 'Type': 'A', 'TTL': 10, 'ResourceRecords': [{ @@ -540,20 +553,20 @@ def test_change_resource_record_sets_crud_valid(): response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) len(response['ResourceRecordSets']).should.equal(1) a_record_detail = response['ResourceRecordSets'][0] - a_record_detail['Name'].should.equal('prod.redis.db') + a_record_detail['Name'].should.equal('prod.redis.db.') a_record_detail['Type'].should.equal('A') a_record_detail['TTL'].should.equal(10) a_record_detail['ResourceRecords'].should.equal([{'Value': '127.0.0.1'}]) - # Update type to CNAME + # Update A Record. cname_record_endpoint_payload = { - 'Comment': 'Update to CNAME prod.redis.db', + 'Comment': 'Update A record prod.redis.db', 'Changes': [ { 'Action': 'UPSERT', 'ResourceRecordSet': { - 'Name': 'prod.redis.db', - 'Type': 'CNAME', + 'Name': 'prod.redis.db.', + 'Type': 'A', 'TTL': 60, 'ResourceRecords': [{ 'Value': '192.168.1.1' @@ -567,12 +580,45 @@ def test_change_resource_record_sets_crud_valid(): response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) len(response['ResourceRecordSets']).should.equal(1) cname_record_detail = response['ResourceRecordSets'][0] - cname_record_detail['Name'].should.equal('prod.redis.db') - cname_record_detail['Type'].should.equal('CNAME') + cname_record_detail['Name'].should.equal('prod.redis.db.') + cname_record_detail['Type'].should.equal('A') cname_record_detail['TTL'].should.equal(60) cname_record_detail['ResourceRecords'].should.equal([{'Value': '192.168.1.1'}]) - # Delete record. + # Update to add Alias. + cname_alias_record_endpoint_payload = { + 'Comment': 'Update to Alias prod.redis.db', + 'Changes': [ + { + 'Action': 'UPSERT', + 'ResourceRecordSet': { + 'Name': 'prod.redis.db.', + 'Type': 'A', + 'TTL': 60, + 'AliasTarget': { + 'HostedZoneId': hosted_zone_id, + 'DNSName': 'prod.redis.alias.', + 'EvaluateTargetHealth': False, + } + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=cname_alias_record_endpoint_payload) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + cname_alias_record_detail = response['ResourceRecordSets'][0] + cname_alias_record_detail['Name'].should.equal('prod.redis.db.') + cname_alias_record_detail['Type'].should.equal('A') + cname_alias_record_detail['TTL'].should.equal(60) + cname_alias_record_detail['AliasTarget'].should.equal({ + 'HostedZoneId': hosted_zone_id, + 'DNSName': 'prod.redis.alias.', + 'EvaluateTargetHealth': False, + }) + cname_alias_record_detail.should_not.contain('ResourceRecords') + + # Delete record with wrong type. delete_payload = { 'Comment': 'delete prod.redis.db', 'Changes': [ @@ -587,8 +633,133 @@ def test_change_resource_record_sets_crud_valid(): } conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=delete_payload) response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(1) + + # Delete record. + delete_payload = { + 'Comment': 'delete prod.redis.db', + 'Changes': [ + { + 'Action': 'DELETE', + 'ResourceRecordSet': { + 'Name': 'prod.redis.db', + 'Type': 'A', + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=delete_payload) + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) len(response['ResourceRecordSets']).should.equal(0) +@mock_route53 +def test_change_weighted_resource_record_sets(): + conn = boto3.client('route53', region_name='us-east-2') + conn.create_hosted_zone( + Name='test.vpc.internal.', + CallerReference=str(hash('test')) + ) + + zones = conn.list_hosted_zones_by_name( + DNSName='test.vpc.internal.' + ) + + hosted_zone_id = zones['HostedZones'][0]['Id'] + + #Create 2 weighted records + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch={ + 'Changes': [ + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': 'test.vpc.internal', + 'Type': 'A', + 'SetIdentifier': 'test1', + 'Weight': 50, + 'AliasTarget': { + 'HostedZoneId': 'Z3AADJGX6KTTL2', + 'DNSName': 'internal-test1lb-447688172.us-east-2.elb.amazonaws.com.', + 'EvaluateTargetHealth': True + } + } + }, + + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': 'test.vpc.internal', + 'Type': 'A', + 'SetIdentifier': 'test2', + 'Weight': 50, + 'AliasTarget': { + 'HostedZoneId': 'Z3AADJGX6KTTL2', + 'DNSName': 'internal-testlb2-1116641781.us-east-2.elb.amazonaws.com.', + 'EvaluateTargetHealth': True + } + } + } + ] + } + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + record = response['ResourceRecordSets'][0] + #Update the first record to have a weight of 90 + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch={ + 'Changes' : [ + { + 'Action' : 'UPSERT', + 'ResourceRecordSet' : { + 'Name' : record['Name'], + 'Type' : record['Type'], + 'SetIdentifier' : record['SetIdentifier'], + 'Weight' : 90, + 'AliasTarget' : { + 'HostedZoneId' : record['AliasTarget']['HostedZoneId'], + 'DNSName' : record['AliasTarget']['DNSName'], + 'EvaluateTargetHealth' : record['AliasTarget']['EvaluateTargetHealth'] + } + } + }, + ] + } + ) + + record = response['ResourceRecordSets'][1] + #Update the second record to have a weight of 10 + conn.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch={ + 'Changes' : [ + { + 'Action' : 'UPSERT', + 'ResourceRecordSet' : { + 'Name' : record['Name'], + 'Type' : record['Type'], + 'SetIdentifier' : record['SetIdentifier'], + 'Weight' : 10, + 'AliasTarget' : { + 'HostedZoneId' : record['AliasTarget']['HostedZoneId'], + 'DNSName' : record['AliasTarget']['DNSName'], + 'EvaluateTargetHealth' : record['AliasTarget']['EvaluateTargetHealth'] + } + } + }, + ] + } + ) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + for record in response['ResourceRecordSets']: + if record['SetIdentifier'] == 'test1': + record['Weight'].should.equal(90) + if record['SetIdentifier'] == 'test2': + record['Weight'].should.equal(10) + @mock_route53 def test_change_resource_record_invalid(): @@ -688,12 +859,12 @@ def test_list_resource_record_sets_name_type_filters(): # record_type, record_name all_records = [ - ('A', 'a.a.db'), - ('A', 'a.b.db'), - ('A', 'b.b.db'), - ('CNAME', 'b.b.db'), - ('CNAME', 'b.c.db'), - ('CNAME', 'c.c.db') + ('A', 'a.a.db.'), + ('A', 'a.b.db.'), + ('A', 'b.b.db.'), + ('CNAME', 'b.b.db.'), + ('CNAME', 'b.c.db.'), + ('CNAME', 'c.c.db.') ] for record_type, record_name in all_records: create_resource_record_set(record_type, record_name) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 829941d79..b6129c542 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -8,6 +8,7 @@ from functools import wraps from gzip import GzipFile from io import BytesIO import zlib +import pickle import json import boto @@ -50,6 +51,7 @@ def reduced_min_part_size(f): return f(*args, **kwargs) finally: s3model.UPLOAD_PART_MIN_SIZE = orig_size + return wrapped @@ -64,6 +66,50 @@ class MyModel(object): s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) +@mock_s3 +def test_keys_are_pickleable(): + """Keys must be pickleable due to boto3 implementation details.""" + key = s3model.FakeKey('name', b'data!') + assert key.value == b'data!' + + pickled = pickle.dumps(key) + loaded = pickle.loads(pickled) + assert loaded.value == key.value + + +@mock_s3 +def test_append_to_value__basic(): + key = s3model.FakeKey('name', b'data!') + assert key.value == b'data!' + assert key.size == 5 + + key.append_to_value(b' And even more data') + assert key.value == b'data! And even more data' + assert key.size == 24 + + +@mock_s3 +def test_append_to_value__nothing_added(): + key = s3model.FakeKey('name', b'data!') + assert key.value == b'data!' + assert key.size == 5 + + key.append_to_value(b'') + assert key.value == b'data!' + assert key.size == 5 + + +@mock_s3 +def test_append_to_value__empty_key(): + key = s3model.FakeKey('name', b'') + assert key.value == b'' + assert key.size == 0 + + key.append_to_value(b'stuff') + assert key.value == b'stuff' + assert key.size == 5 + + @mock_s3 def test_my_model_save(): # Create Bucket so that test can run @@ -224,6 +270,29 @@ def test_multipart_invalid_order(): bucket.complete_multipart_upload.when.called_with( multipart.key_name, multipart.id, xml).should.throw(S3ResponseError) +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_etag_quotes_stripped(): + # Create Bucket so that test can run + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('mybucket') + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag + # last part, can be less than 5 MB + part2 = b'1' + etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag + # Strip quotes from etags + etag1 = etag1.replace('"','') + etag2 = etag2.replace('"','') + xml = "{0}{1}" + xml = xml.format(1, etag1) + xml.format(2, etag2) + xml = "{0}".format(xml) + bucket.complete_multipart_upload.when.called_with( + multipart.key_name, multipart.id, xml).should_not.throw(S3ResponseError) + # we should get both parts as the key contents + bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) @mock_s3_deprecated @reduced_min_part_size @@ -349,6 +418,22 @@ def test_copy_key(): "new-key").get_contents_as_string().should.equal(b"some value") +@mock_s3_deprecated +def test_copy_key_with_unicode(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-unicode-💩-key" + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-unicode-💩-key') + + bucket.get_key( + "the-unicode-💩-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") + + @mock_s3_deprecated def test_copy_key_with_version(): conn = boto.connect_s3('the_key', 'the_secret') @@ -359,7 +444,12 @@ def test_copy_key_with_version(): key.set_contents_from_string("some value") key.set_contents_from_string("another value") - bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id='0') + key = [ + key.version_id + for key in bucket.get_all_versions() + if not key.is_latest + ][0] + bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id=key) bucket.get_key( "the-key").get_contents_as_string().should.equal(b"another value") @@ -733,16 +823,19 @@ def test_key_version(): bucket = conn.create_bucket('foobar') bucket.configure_versioning(versioning=True) + versions = [] + key = Key(bucket) key.key = 'the-key' key.version_id.should.be.none key.set_contents_from_string('some string') - key.version_id.should.equal('0') + versions.append(key.version_id) key.set_contents_from_string('some string') - key.version_id.should.equal('1') + versions.append(key.version_id) + set(versions).should.have.length_of(2) key = bucket.get_key('the-key') - key.version_id.should.equal('1') + key.version_id.should.equal(versions[-1]) @mock_s3_deprecated @@ -751,23 +844,25 @@ def test_list_versions(): bucket = conn.create_bucket('foobar') bucket.configure_versioning(versioning=True) + key_versions = [] + key = Key(bucket, 'the-key') key.version_id.should.be.none key.set_contents_from_string("Version 1") - key.version_id.should.equal('0') + key_versions.append(key.version_id) key.set_contents_from_string("Version 2") - key.version_id.should.equal('1') + key_versions.append(key.version_id) + key_versions.should.have.length_of(2) versions = list(bucket.list_versions()) - versions.should.have.length_of(2) versions[0].name.should.equal('the-key') - versions[0].version_id.should.equal('0') + versions[0].version_id.should.equal(key_versions[0]) versions[0].get_contents_as_string().should.equal(b"Version 1") versions[1].name.should.equal('the-key') - versions[1].version_id.should.equal('1') + versions[1].version_id.should.equal(key_versions[1]) versions[1].get_contents_as_string().should.equal(b"Version 2") key = Key(bucket, 'the2-key') @@ -883,11 +978,12 @@ def test_s3_object_in_public_bucket(): s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() exc.exception.response['Error']['Code'].should.equal('403') - params = {'Bucket': 'test-bucket','Key': 'file.txt'} + params = {'Bucket': 'test-bucket', 'Key': 'file.txt'} presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) response = requests.get(presigned_url) assert response.status_code == 200 + @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource('s3') @@ -952,6 +1048,15 @@ def test_bucket_location(): bucket.get_location().should.equal("us-west-2") +@mock_s3 +def test_bucket_location_us_east_1(): + cli = boto3.client('s3') + bucket_name = 'mybucket' + # No LocationConstraint ==> us-east-1 + cli.create_bucket(Bucket=bucket_name) + cli.get_bucket_location(Bucket=bucket_name)['LocationConstraint'].should.equal(None) + + @mock_s3_deprecated def test_ranged_get(): conn = boto.connect_s3() @@ -1102,6 +1207,7 @@ def test_boto3_key_etag(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') + @mock_s3 def test_website_redirect_location(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1116,6 +1222,7 @@ def test_website_redirect_location(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['WebsiteRedirectLocation'].should.equal(url) + @mock_s3 def test_boto3_list_keys_xml_escaped(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1136,6 +1243,30 @@ def test_boto3_list_keys_xml_escaped(): assert 'Owner' not in resp['Contents'][0] +@mock_s3 +def test_boto3_list_objects_v2_common_prefix_pagination(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + + max_keys = 1 + keys = ['test/{i}/{i}'.format(i=i) for i in range(3)] + for key in keys: + s3.put_object(Bucket='mybucket', Key=key, Body=b'v') + + prefixes = [] + args = {"Bucket": 'mybucket', "Delimiter": "/", "Prefix": "test/", "MaxKeys": max_keys} + resp = {"IsTruncated": True} + while resp.get("IsTruncated", False): + if "NextContinuationToken" in resp: + args["ContinuationToken"] = resp["NextContinuationToken"] + resp = s3.list_objects_v2(**args) + if "CommonPrefixes" in resp: + assert len(resp["CommonPrefixes"]) == max_keys + prefixes.extend(i["Prefix"] for i in resp["CommonPrefixes"]) + + assert prefixes == [k[:k.rindex('/') + 1] for k in keys] + + @mock_s3 def test_boto3_list_objects_v2_truncated_response(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1273,6 +1404,16 @@ def test_bucket_create_duplicate(): exc.exception.response['Error']['Code'].should.equal('BucketAlreadyExists') +@mock_s3 +def test_bucket_create_force_us_east_1(): + s3 = boto3.resource('s3', region_name='us-east-1') + with assert_raises(ClientError) as exc: + s3.create_bucket(Bucket="blah", CreateBucketConfiguration={ + 'LocationConstraint': 'us-east-1', + }) + exc.exception.response['Error']['Code'].should.equal('InvalidLocationConstraint') + + @mock_s3 def test_boto3_bucket_create_eu_central(): s3 = boto3.resource('s3', region_name='eu-central-1') @@ -1352,16 +1493,22 @@ def test_boto3_head_object_with_versioning(): s3.Object('blah', 'hello.txt').put(Body=old_content) s3.Object('blah', 'hello.txt').put(Body=new_content) + versions = list(s3.Bucket('blah').object_versions.all()) + latest = list(filter(lambda item: item.is_latest, versions))[0] + oldest = list(filter(lambda item: not item.is_latest, versions))[0] + head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( Bucket='blah', Key='hello.txt') - head_object['VersionId'].should.equal('1') + head_object['VersionId'].should.equal(latest.id) head_object['ContentLength'].should.equal(len(new_content)) old_head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt', VersionId='0') - old_head_object['VersionId'].should.equal('0') + Bucket='blah', Key='hello.txt', VersionId=oldest.id) + old_head_object['VersionId'].should.equal(oldest.id) old_head_object['ContentLength'].should.equal(len(old_content)) + old_head_object['VersionId'].should_not.equal(head_object['VersionId']) + @mock_s3 def test_boto3_copy_object_with_versioning(): @@ -1376,15 +1523,101 @@ def test_boto3_copy_object_with_versioning(): obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId'] obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId'] - # Versions should be the same - obj1_version.should.equal(obj2_version) - client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2') obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId'] # Version should be different to previous version obj2_version_new.should_not.equal(obj2_version) + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test2', 'VersionId': obj2_version}, Bucket='blah', Key='test3') + obj3_version_new = client.get_object(Bucket='blah', Key='test3')['VersionId'] + obj3_version_new.should_not.equal(obj2_version_new) + + # Copy file that doesn't exist + with assert_raises(ClientError) as e: + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test4', 'VersionId': obj2_version}, Bucket='blah', Key='test5') + e.exception.response['Error']['Code'].should.equal('404') + + response = client.create_multipart_upload(Bucket='blah', Key='test4') + upload_id = response['UploadId'] + response = client.upload_part_copy(Bucket='blah', Key='test4', CopySource={'Bucket': 'blah', 'Key': 'test3', 'VersionId': obj3_version_new}, + UploadId=upload_id, PartNumber=1) + etag = response["CopyPartResult"]["ETag"] + client.complete_multipart_upload( + Bucket='blah', Key='test4', UploadId=upload_id, + MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': 1}]}) + + response = client.get_object(Bucket='blah', Key='test4') + data = response["Body"].read() + data.should.equal(b'test2') + + +@mock_s3 +def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='src', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.create_bucket(Bucket='dest', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.put_bucket_versioning(Bucket='dest', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='src', Key='test', Body=b'content') + + obj2_version_new = client.copy_object(CopySource={'Bucket': 'src', 'Key': 'test'}, Bucket='dest', Key='test') \ + .get('VersionId') + + # VersionId should be present in the response + obj2_version_new.should_not.equal(None) + + +@mock_s3 +def test_boto3_deleted_versionings_list(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah') + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + client.delete_objects(Bucket='blah', Delete={'Objects': [{'Key': 'test1'}]}) + + listed = client.list_objects_v2(Bucket='blah') + assert len(listed['Contents']) == 1 + + +@mock_s3 +def test_boto3_delete_versioned_bucket(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah') + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + resp = client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.delete_object(Bucket='blah', Key='test1', VersionId=resp["VersionId"]) + + client.delete_bucket(Bucket='blah') + +@mock_s3 +def test_boto3_get_object_if_modified_since(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = 'hello.txt' + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.get_object( + Bucket=bucket_name, + Key=key, + IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1) + ) + e = err.exception + e.response['Error'].should.equal({'Code': '304', 'Message': 'Not Modified'}) @mock_s3 def test_boto3_head_object_if_modified_since(): @@ -1438,6 +1671,42 @@ def test_boto3_multipart_etag(): resp['ETag'].should.equal(EXPECTED_ETAG) +@mock_s3 +@reduced_min_part_size +def test_boto3_multipart_part_size(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + + mpu = s3.create_multipart_upload(Bucket='mybucket', Key='the-key') + mpu_id = mpu["UploadId"] + + parts = [] + n_parts = 10 + for i in range(1, n_parts + 1): + part_size = 5 * 1024 * 1024 + body = b'1' * part_size + part = s3.upload_part( + Bucket='mybucket', + Key='the-key', + PartNumber=i, + UploadId=mpu_id, + Body=body, + ContentLength=len(body), + ) + parts.append({"PartNumber": i, "ETag": part["ETag"]}) + + s3.complete_multipart_upload( + Bucket='mybucket', + Key='the-key', + UploadId=mpu_id, + MultipartUpload={"Parts": parts}, + ) + + for i in range(1, n_parts + 1): + obj = s3.head_object(Bucket='mybucket', Key='the-key', PartNumber=i) + assert obj["ContentLength"] == part_size + + @mock_s3 def test_boto3_put_object_with_tagging(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1498,6 +1767,24 @@ def test_boto3_put_bucket_tagging(): }) resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + # With duplicate tag keys: + with assert_raises(ClientError) as err: + resp = s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagOne", + "Value": "ValueOneAgain" + } + ] + }) + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidTag") + e.response["Error"]["Message"].should.equal("Cannot provide multiple Tags with the same key") @mock_s3 def test_boto3_get_bucket_tagging(): @@ -1627,7 +1914,7 @@ def test_boto3_put_bucket_cors(): }) e = err.exception e.response["Error"]["Code"].should.equal("InvalidRequest") - e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " + e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " "Unsupported method is NOTREAL") with assert_raises(ClientError) as err: @@ -1732,6 +2019,476 @@ def test_boto3_delete_bucket_cors(): e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") +@mock_s3 +def test_put_bucket_acl_body(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + } + ], + "Owner": bucket_owner + }) + + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 2 + for g in result["Grants"]: + assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery" + assert g["Grantee"]["Type"] == "Group" + assert g["Permission"] in ["WRITE", "READ_ACP"] + + # With one: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ], + "Owner": bucket_owner + }) + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 1 + + # With no owner: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ] + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # With incorrect permission: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "lskjflkasdjflkdsjfalisdjflkdsjf" + } + ], + "Owner": bucket_owner + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # Clear the ACLs: + result = s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner}) + assert not result.get("Grants") + + +@mock_s3 +def test_put_bucket_notification(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With no configuration: + result = s3.get_bucket_notification(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + # Place proper topic configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + }, + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:myothertopic", + "Events": [ + "s3:ObjectCreated:*" + ], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + }, + { + "Name": "suffix", + "Value": "png" + } + ] + } + } + } + ] + }) + + # Verify to completion: + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["TopicConfigurations"]) == 2 + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["TopicConfigurations"][0]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:mytopic" + assert result["TopicConfigurations"][1]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:myothertopic" + assert len(result["TopicConfigurations"][0]["Events"]) == 2 + assert len(result["TopicConfigurations"][1]["Events"]) == 1 + assert result["TopicConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Events"][1] == "s3:ObjectRemoved:*" + assert result["TopicConfigurations"][1]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Id"] + assert result["TopicConfigurations"][1]["Id"] + assert not result["TopicConfigurations"][0].get("Filter") + assert len(result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"]) == 2 + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Name"] == "suffix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Value"] == "png" + + # Place proper queue configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "Id": "SomeID", + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["QueueConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["QueueConfigurations"][0]["Id"] == "SomeID" + assert result["QueueConfigurations"][0]["QueueArn"] == "arn:aws:sqs:us-east-1:012345678910:myQueue" + assert result["QueueConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["QueueConfigurations"][0]["Events"]) == 1 + assert len(result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # Place proper Lambda configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert result["LambdaFunctionConfigurations"][0]["Id"] + assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \ + "arn:aws:lambda:us-east-1:012345678910:function:lambda" + assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1 + assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # And with all 3 set: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + } + ], + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"] + } + ], + "QueueConfigurations": [ + { + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert len(result["TopicConfigurations"]) == 1 + assert len(result["QueueConfigurations"]) == 1 + + # And clear it out: + s3.put_bucket_notification_configuration(Bucket="bucket", NotificationConfiguration={}) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + +@mock_s3 +def test_put_bucket_notification_errors(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With incorrect ARNs: + for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "{}Configurations".format(tech): [ + { + "{}Arn".format(tech): + "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The ARN is not well formed" + + # Region not the same as the bucket: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-west-2:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == \ + "The notification destination service region is not valid for the bucket location constraint" + + # Invalid event name: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["notarealeventname"] + } + ] + }) + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The event is not supported for notifications" + + +@mock_s3 +def test_boto3_put_bucket_logging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + log_bucket = "logbucket" + wrong_region_bucket = "wrongregionlogbucket" + s3.create_bucket(Bucket=bucket_name) + s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later... + s3.create_bucket(Bucket=wrong_region_bucket, CreateBucketConfiguration={"LocationConstraint": "us-west-2"}) + + # No logging config: + result = s3.get_bucket_logging(Bucket=bucket_name) + assert not result.get("LoggingEnabled") + + # A log-bucket that doesn't exist: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": "IAMNOTREAL", + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + + # A log-bucket that's missing the proper ACLs for LogDelivery: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert "log-delivery" in err.exception.response["Error"]["Message"] + + # Add the proper "log-delivery" ACL to the log buckets: + bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] + for bucket in [log_bucket, wrong_region_bucket]: + s3.put_bucket_acl(Bucket=bucket, AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + }, + { + "Grantee": { + "Type": "CanonicalUser", + "ID": bucket_owner["ID"] + }, + "Permission": "FULL_CONTROL" + } + ], + "Owner": bucket_owner + }) + + # A log-bucket that's in the wrong region: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": wrong_region_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" + + # Correct logging: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name) + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert result["LoggingEnabled"]["TargetBucket"] == log_bucket + assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name) + assert not result["LoggingEnabled"].get("TargetGrants") + + # And disabling: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={}) + assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled") + + # And enabling with multiple target grants: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + }, + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "WRITE" + } + ] + } + }) + + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 2 + assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \ + "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" + + # Test with just 1 grant: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + } + ] + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 + + # With an invalid grant: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "NOTAREALPERM" + } + ] + } + }) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + @mock_s3 def test_boto3_put_object_tagging(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1860,6 +2617,104 @@ def test_boto3_list_object_versions(): response['Body'].read().should.equal(items[-1]) +@mock_s3 +def test_boto3_list_object_versions_with_versioning_disabled(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-versions' + s3.create_bucket(Bucket=bucket_name) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + response = s3.list_object_versions( + Bucket=bucket_name + ) + + # One object version should be returned + len(response['Versions']).should.equal(1) + response['Versions'][0]['Key'].should.equal(key) + + # The version id should be the string null + response['Versions'][0]['VersionId'].should.equal('null') + + # Test latest object version is returned + response = s3.get_object(Bucket=bucket_name, Key=key) + response['Body'].read().should.equal(items[-1]) + + +@mock_s3 +def test_boto3_list_object_versions_with_versioning_enabled_late(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-versions' + s3.create_bucket(Bucket=bucket_name) + items = (six.b('v1'), six.b('v2')) + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=six.b('v1') + ) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=six.b('v2') + ) + response = s3.list_object_versions( + Bucket=bucket_name + ) + + # Two object versions should be returned + len(response['Versions']).should.equal(2) + keys = set([item['Key'] for item in response['Versions']]) + keys.should.equal({key}) + + # There should still be a null version id. + versionsId = set([item['VersionId'] for item in response['Versions']]) + versionsId.should.contain('null') + + # Test latest object version is returned + response = s3.get_object(Bucket=bucket_name, Key=key) + response['Body'].read().should.equal(items[-1]) + +@mock_s3 +def test_boto3_bad_prefix_list_object_versions(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-versions' + bad_prefix = 'key-that-does-not-exist' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + response = s3.list_object_versions( + Bucket=bucket_name, + Prefix=bad_prefix, + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response.should_not.contain('Versions') + response.should_not.contain('DeleteMarkers') + + @mock_s3 def test_boto3_delete_markers(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1887,30 +2742,101 @@ def test_boto3_delete_markers(): Bucket=bucket_name, Key=key ) - e.response['Error']['Code'].should.equal('404') + e.exception.response['Error']['Code'].should.equal('NoSuchKey') + + response = s3.list_object_versions( + Bucket=bucket_name + ) + response['Versions'].should.have.length_of(2) + response['DeleteMarkers'].should.have.length_of(1) s3.delete_object( Bucket=bucket_name, Key=key, - VersionId='2' + VersionId=response['DeleteMarkers'][0]['VersionId'] ) response = s3.get_object( Bucket=bucket_name, Key=key ) response['Body'].read().should.equal(items[-1]) + response = s3.list_object_versions( Bucket=bucket_name ) response['Versions'].should.have.length_of(2) + # We've asserted there is only 2 records so one is newest, one is oldest + latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] + oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] + # Double check ordering of version ID's + latest['VersionId'].should_not.equal(oldest['VersionId']) + + # Double check the name is still unicode + latest['Key'].should.equal('key-with-versions-and-unicode-ó') + oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + + +@mock_s3 +def test_boto3_multiple_delete_markers(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = u'key-with-versions-and-unicode-ó' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + + # Delete the object twice to add multiple delete markers + s3.delete_object(Bucket=bucket_name, Key=key) + s3.delete_object(Bucket=bucket_name, Key=key) + + response = s3.list_object_versions(Bucket=bucket_name) + response['DeleteMarkers'].should.have.length_of(2) + + with assert_raises(ClientError) as e: + s3.get_object( + Bucket=bucket_name, + Key=key + ) + e.response['Error']['Code'].should.equal('404') + + # Remove both delete markers to restore the object + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId=response['DeleteMarkers'][0]['VersionId'] + ) + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId=response['DeleteMarkers'][1]['VersionId'] + ) + + response = s3.get_object( + Bucket=bucket_name, + Key=key + ) + response['Body'].read().should.equal(items[-1]) + response = s3.list_object_versions(Bucket=bucket_name) + response['Versions'].should.have.length_of(2) + # We've asserted there is only 2 records so one is newest, one is oldest latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] # Double check ordering of version ID's - latest['VersionId'].should.equal('1') - oldest['VersionId'].should.equal('0') + latest['VersionId'].should_not.equal(oldest['VersionId']) # Double check the name is still unicode latest['Key'].should.equal('key-with-versions-and-unicode-ó') @@ -1939,11 +2865,10 @@ def test_get_stream_gzipped(): Bucket='moto-tests', Key='keyname', ) - res = zlib.decompress(obj['Body'].read(), 16+zlib.MAX_WBITS) + res = zlib.decompress(obj['Body'].read(), 16 + zlib.MAX_WBITS) assert res == payload - TEST_XML = """\ @@ -1962,3 +2887,94 @@ TEST_XML = """\ """ + +@mock_s3 +def test_boto3_bucket_name_too_long(): + s3 = boto3.client('s3', region_name='us-east-1') + with assert_raises(ClientError) as exc: + s3.create_bucket(Bucket='x'*64) + exc.exception.response['Error']['Code'].should.equal('InvalidBucketName') + +@mock_s3 +def test_boto3_bucket_name_too_short(): + s3 = boto3.client('s3', region_name='us-east-1') + with assert_raises(ClientError) as exc: + s3.create_bucket(Bucket='x'*2) + exc.exception.response['Error']['Code'].should.equal('InvalidBucketName') + +@mock_s3 +def test_accelerated_none_when_unspecified(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.shouldnt.have.key('Status') + +@mock_s3 +def test_can_enable_bucket_acceleration(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.should.have.key('Status') + resp['Status'].should.equal('Enabled') + +@mock_s3 +def test_can_suspend_bucket_acceleration(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Suspended'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.should.have.key('Status') + resp['Status'].should.equal('Suspended') + +@mock_s3 +def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + resp = s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Suspended'}, + ) + resp.keys().should.have.length_of(1) # Response contains nothing (only HTTP headers) + resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) + resp.shouldnt.have.key('Status') + +@mock_s3 +def test_accelerate_configuration_status_validation(): + bucket_name = 'some_bucket' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as exc: + s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'bad_status'}, + ) + exc.exception.response['Error']['Code'].should.equal('MalformedXML') + +@mock_s3 +def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): + bucket_name = 'some.bucket.with.dots' + s3 = boto3.client('s3') + s3.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as exc: + s3.put_bucket_accelerate_configuration( + Bucket=bucket_name, + AccelerateConfiguration={'Status': 'Enabled'}, + ) + exc.exception.response['Error']['Code'].should.equal('InvalidRequest') diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 5cae8f790..3d533a641 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -1,12 +1,16 @@ from __future__ import unicode_literals import boto +import boto3 from boto.exception import S3ResponseError from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule import sure # noqa +from botocore.exceptions import ClientError +from datetime import datetime +from nose.tools import assert_raises -from moto import mock_s3_deprecated +from moto import mock_s3_deprecated, mock_s3 @mock_s3_deprecated @@ -26,6 +30,288 @@ def test_lifecycle_create(): list(lifecycle.transition).should.equal([]) +@mock_s3 +def test_lifecycle_with_filters(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + # Create a lifecycle rule with a Filter (no tags): + lfc = { + "Rules": [ + { + "Expiration": { + "Days": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert not result["Rules"][0]["Filter"].get("Tag") + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With a tag: + lfc["Rules"][0]["Filter"]["Tag"] = { + "Key": "mytag", + "Value": "mytagvalue" + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With And (single tag): + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 1 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With multiple And tags: + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + }, + { + "Key": "mytag2", + "Value": "mytagvalue2" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 2 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # Can't have both filter and prefix: + lfc["Rules"][0]["Prefix"] = '' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + lfc["Rules"][0]["Prefix"] = 'some/path' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + # No filters -- just a prefix: + del lfc["Rules"][0]["Filter"] + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert not result["Rules"][0].get("Filter") + assert result["Rules"][0]["Prefix"] == "some/path" + + +@mock_s3 +def test_lifecycle_with_eodm(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "Expiration": { + "ExpiredObjectDeleteMarker": True + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # Set to False: + lfc["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] = False + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert not result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # With failure: + lfc["Rules"][0]["Expiration"]["Days"] = 7 + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + del lfc["Rules"][0]["Expiration"]["Days"] + + lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_lifecycle_with_nve(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionExpiration": { + "NoncurrentDays": 30 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 30 + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 10 + + # TODO: Add test for failures due to missing children + + +@mock_s3 +def test_lifecycle_with_nvt(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionTransitions": [{ + "NoncurrentDays": 30, + "StorageClass": "ONEZONE_IA" + }], + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 30 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "ONEZONE_IA" + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 10 + + # Change StorageClass: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] = "GLACIER" + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "GLACIER" + + # With failures for missing children: + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 + + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_lifecycle_with_aimu(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "AbortIncompleteMultipartUpload": { + "DaysAfterInitiation": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 7 + + # Change DaysAfterInitiation: + lfc["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] = 30 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 30 + + # TODO: Add test for failures due to missing children + + @mock_s3_deprecated def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py new file mode 100644 index 000000000..99908c501 --- /dev/null +++ b/tests/test_s3/test_s3_storageclass.py @@ -0,0 +1,106 @@ +from __future__ import unicode_literals + +import boto +import boto3 +from boto.exception import S3CreateError, S3ResponseError +from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule + +import sure # noqa +from botocore.exceptions import ClientError +from datetime import datetime +from nose.tools import assert_raises + +from moto import mock_s3_deprecated, mock_s3 + + +@mock_s3 +def test_s3_storage_class_standard(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # add an object to the bucket with standard storage + + s3.put_object(Bucket="Bucket", Key="my_key", Body="my_value") + + list_of_objects = s3.list_objects(Bucket="Bucket") + + list_of_objects['Contents'][0]["StorageClass"].should.equal("STANDARD") + + +@mock_s3 +def test_s3_storage_class_infrequent_access(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # add an object to the bucket with standard storage + + s3.put_object(Bucket="Bucket", Key="my_key_infrequent", Body="my_value_infrequent", StorageClass="STANDARD_IA") + + D = s3.list_objects(Bucket="Bucket") + + D['Contents'][0]["StorageClass"].should.equal("STANDARD_IA") + + +@mock_s3 +def test_s3_storage_class_copy(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") + + s3.create_bucket(Bucket="Bucket2") + # second object is originally of storage class REDUCED_REDUNDANCY + s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2") + + s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="ONEZONE_IA") + + list_of_copied_objects = s3.list_objects(Bucket="Bucket2") + + # checks that a copied object can be properly copied + list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("ONEZONE_IA") + + +@mock_s3 +def test_s3_invalid_copied_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") + + s3.create_bucket(Bucket="Bucket2") + s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2", StorageClass="REDUCED_REDUNDANCY") + + # Try to copy an object with an invalid storage class + with assert_raises(ClientError) as err: + s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="STANDARD2") + + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidStorageClass") + e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") + + +@mock_s3 +def test_s3_invalid_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # Try to add an object with an invalid storage class + with assert_raises(ClientError) as err: + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD") + + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidStorageClass") + e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") + +@mock_s3 +def test_s3_default_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body") + + list_of_objects = s3.list_objects(Bucket="Bucket") + + # tests that the default storage class is still STANDARD + list_of_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") + + + diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index b4f56d89a..ce9f54c75 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals +import os from sure import expect -from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore +from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore, parse_region_from_url def test_base_url(): @@ -16,6 +17,12 @@ def test_localhost_without_bucket(): expect(bucket_name_from_url( 'https://www.localhost:5000/def')).should.equal(None) +def test_force_ignore_subdomain_for_bucketnames(): + os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME'] = '1' + expect(bucket_name_from_url('https://subdomain.localhost:5000/abc/resource')).should.equal(None) + del(os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME']) + + def test_versioned_key_store(): d = _VersionedKeyStore() @@ -53,3 +60,21 @@ def test_versioned_key_store(): d.setlist('key', [[1], [2]]) d['key'].should.have.length_of(1) d.getlist('key').should.be.equal([[1], [2]]) + + +def test_parse_region_from_url(): + expected = 'us-west-2' + for url in ['http://s3-us-west-2.amazonaws.com/bucket', + 'http://s3.us-west-2.amazonaws.com/bucket', + 'http://bucket.s3-us-west-2.amazonaws.com', + 'https://s3-us-west-2.amazonaws.com/bucket', + 'https://s3.us-west-2.amazonaws.com/bucket', + 'https://bucket.s3-us-west-2.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) + + expected = 'us-east-1' + for url in ['http://s3.amazonaws.com/bucket', + 'http://bucket.s3.amazonaws.com', + 'https://s3.amazonaws.com/bucket', + 'https://bucket.s3.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 9c8252a04..b179a2329 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -15,6 +15,7 @@ class AuthenticatedClient(FlaskClient): def open(self, *args, **kwargs): kwargs['headers'] = kwargs.get('headers', {}) kwargs['headers']['Authorization'] = "Any authorization header" + kwargs['content_length'] = 0 # Fixes content-length complaints. return super(AuthenticatedClient, self).open(*args, **kwargs) diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_bucket_path_server.py index 434110e87..f6238dd28 100644 --- a/tests/test_s3bucket_path/test_bucket_path_server.py +++ b/tests/test_s3bucket_path/test_bucket_path_server.py @@ -13,6 +13,7 @@ class AuthenticatedClient(FlaskClient): def open(self, *args, **kwargs): kwargs['headers'] = kwargs.get('headers', {}) kwargs['headers']['Authorization'] = "Any authorization header" + kwargs['content_length'] = 0 # Fixes content-length complaints. return super(AuthenticatedClient, self).open(*args, **kwargs) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py new file mode 100644 index 000000000..78b95ee6a --- /dev/null +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -0,0 +1,579 @@ +from __future__ import unicode_literals + +import boto3 + +from moto import mock_secretsmanager +from botocore.exceptions import ClientError +import string +import unittest +import pytz +from datetime import datetime +from nose.tools import assert_raises +from six import b + +DEFAULT_SECRET_NAME = 'test-secret' + + +@mock_secretsmanager +def test_get_secret_value(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + create_secret = conn.create_secret(Name='java-util-test-password', + SecretString="foosecret") + result = conn.get_secret_value(SecretId='java-util-test-password') + assert result['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_get_secret_value_binary(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + create_secret = conn.create_secret(Name='java-util-test-password', + SecretBinary=b("foosecret")) + result = conn.get_secret_value(SecretId='java-util-test-password') + assert result['SecretBinary'] == b('foosecret') + +@mock_secretsmanager +def test_get_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_get_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + create_secret = conn.create_secret(Name='java-util-test-password', + SecretString="foosecret") + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-match') + + +@mock_secretsmanager +def test_get_secret_value_that_is_marked_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.delete_secret(SecretId='test-secret') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='test-secret') + + +@mock_secretsmanager +def test_create_secret(): + conn = boto3.client('secretsmanager', region_name='us-east-1') + + result = conn.create_secret(Name='test-secret', SecretString="foosecret") + assert result['ARN'] + assert result['Name'] == 'test-secret' + secret = conn.get_secret_value(SecretId='test-secret') + assert secret['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_create_secret_with_tags(): + conn = boto3.client('secretsmanager', region_name='us-east-1') + secret_name = 'test-secret-with-tags' + + result = conn.create_secret( + Name=secret_name, + SecretString="foosecret", + Tags=[{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}] + ) + assert result['ARN'] + assert result['Name'] == secret_name + secret_value = conn.get_secret_value(SecretId=secret_name) + assert secret_value['SecretString'] == 'foosecret' + secret_details = conn.describe_secret(SecretId=secret_name) + assert secret_details['Tags'] == [{"Key": "Foo", "Value": "Bar"}, {"Key": "Mykey", "Value": "Myvalue"}] + + +@mock_secretsmanager +def test_delete_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + deleted_secret = conn.delete_secret(SecretId='test-secret') + + assert deleted_secret['ARN'] + assert deleted_secret['Name'] == 'test-secret' + assert deleted_secret['DeletionDate'] > datetime.fromtimestamp(1, pytz.utc) + + secret_details = conn.describe_secret(SecretId='test-secret') + + assert secret_details['ARN'] + assert secret_details['Name'] == 'test-secret' + assert secret_details['DeletedDate'] > datetime.fromtimestamp(1, pytz.utc) + + +@mock_secretsmanager +def test_delete_secret_force(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + result = conn.delete_secret(SecretId='test-secret', ForceDeleteWithoutRecovery=True) + + assert result['ARN'] + assert result['DeletionDate'] > datetime.fromtimestamp(1, pytz.utc) + assert result['Name'] == 'test-secret' + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='test-secret') + + +@mock_secretsmanager +def test_delete_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='i-dont-exist', ForceDeleteWithoutRecovery=True) + + +@mock_secretsmanager +def test_delete_secret_fails_with_both_force_delete_flag_and_recovery_window_flag(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret', RecoveryWindowInDays=1, ForceDeleteWithoutRecovery=True) + + +@mock_secretsmanager +def test_delete_secret_recovery_window_too_short(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret', RecoveryWindowInDays=6) + + +@mock_secretsmanager +def test_delete_secret_recovery_window_too_long(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret', RecoveryWindowInDays=31) + + +@mock_secretsmanager +def test_delete_secret_that_is_marked_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + deleted_secret = conn.delete_secret(SecretId='test-secret') + + with assert_raises(ClientError): + result = conn.delete_secret(SecretId='test-secret') + + +@mock_secretsmanager +def test_get_random_password_default_length(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password() + assert len(random_password['RandomPassword']) == 32 + +@mock_secretsmanager +def test_get_random_password_default_requirements(): + # When require_each_included_type, default true + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password() + # Should contain lowercase, upppercase, digit, special character + assert any(c.islower() for c in random_password['RandomPassword']) + assert any(c.isupper() for c in random_password['RandomPassword']) + assert any(c.isdigit() for c in random_password['RandomPassword']) + assert any(c in string.punctuation + for c in random_password['RandomPassword']) + +@mock_secretsmanager +def test_get_random_password_custom_length(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=50) + assert len(random_password['RandomPassword']) == 50 + +@mock_secretsmanager +def test_get_random_exclude_lowercase(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=55, + ExcludeLowercase=True) + assert any(c.islower() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_uppercase(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=55, + ExcludeUppercase=True) + assert any(c.isupper() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_characters_and_symbols(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=20, + ExcludeCharacters='xyzDje@?!.') + assert any(c in 'xyzDje@?!.' for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_numbers(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=100, + ExcludeNumbers=True) + assert any(c.isdigit() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_punctuation(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=100, + ExcludePunctuation=True) + assert any(c in string.punctuation + for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_include_space_false(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=300) + assert any(c.isspace() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_include_space_true(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=4, + IncludeSpace=True) + assert any(c.isspace() for c in random_password['RandomPassword']) == True + +@mock_secretsmanager +def test_get_random_require_each_included_type(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=4, + RequireEachIncludedType=True) + assert any(c in string.punctuation for c in random_password['RandomPassword']) == True + assert any(c in string.ascii_lowercase for c in random_password['RandomPassword']) == True + assert any(c in string.ascii_uppercase for c in random_password['RandomPassword']) == True + assert any(c in string.digits for c in random_password['RandomPassword']) == True + +@mock_secretsmanager +def test_get_random_too_short_password(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + random_password = conn.get_random_password(PasswordLength=3) + +@mock_secretsmanager +def test_get_random_too_long_password(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(Exception): + random_password = conn.get_random_password(PasswordLength=5555) + +@mock_secretsmanager +def test_describe_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.create_secret(Name='test-secret-2', + SecretString='barsecret') + + secret_description = conn.describe_secret(SecretId='test-secret') + secret_description_2 = conn.describe_secret(SecretId='test-secret-2') + + assert secret_description # Returned dict is not empty + assert secret_description['Name'] == ('test-secret') + assert secret_description['ARN'] != '' # Test arn not empty + assert secret_description_2['Name'] == ('test-secret-2') + assert secret_description_2['ARN'] != '' # Test arn not empty + +@mock_secretsmanager +def test_describe_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_describe_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-match') + + +@mock_secretsmanager +def test_list_secrets_empty(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + secrets = conn.list_secrets() + + assert secrets['SecretList'] == [] + + +@mock_secretsmanager +def test_list_secrets(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.create_secret(Name='test-secret-2', + SecretString='barsecret', + Tags=[{ + 'Key': 'a', + 'Value': '1' + }]) + + secrets = conn.list_secrets() + + assert secrets['SecretList'][0]['ARN'] is not None + assert secrets['SecretList'][0]['Name'] == 'test-secret' + assert secrets['SecretList'][1]['ARN'] is not None + assert secrets['SecretList'][1]['Name'] == 'test-secret-2' + assert secrets['SecretList'][1]['Tags'] == [{ + 'Key': 'a', + 'Value': '1' + }] + + +@mock_secretsmanager +def test_restore_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.delete_secret(SecretId='test-secret') + + described_secret_before = conn.describe_secret(SecretId='test-secret') + assert described_secret_before['DeletedDate'] > datetime.fromtimestamp(1, pytz.utc) + + restored_secret = conn.restore_secret(SecretId='test-secret') + assert restored_secret['ARN'] + assert restored_secret['Name'] == 'test-secret' + + described_secret_after = conn.describe_secret(SecretId='test-secret') + assert 'DeletedDate' not in described_secret_after + + +@mock_secretsmanager +def test_restore_secret_that_is_not_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + restored_secret = conn.restore_secret(SecretId='test-secret') + assert restored_secret['ARN'] + assert restored_secret['Name'] == 'test-secret' + + +@mock_secretsmanager +def test_restore_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.restore_secret(SecretId='i-dont-exist') + + +@mock_secretsmanager +def test_rotate_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=DEFAULT_SECRET_NAME, + SecretString='foosecret') + + rotated_secret = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME) + + assert rotated_secret + assert rotated_secret['ARN'] != '' # Test arn not empty + assert rotated_secret['Name'] == DEFAULT_SECRET_NAME + assert rotated_secret['VersionId'] != '' + +@mock_secretsmanager +def test_rotate_secret_enable_rotation(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=DEFAULT_SECRET_NAME, + SecretString='foosecret') + + initial_description = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) + assert initial_description + assert initial_description['RotationEnabled'] is False + assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 + + conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, + RotationRules={'AutomaticallyAfterDays': 42}) + + rotated_description = conn.describe_secret(SecretId=DEFAULT_SECRET_NAME) + assert rotated_description + assert rotated_description['RotationEnabled'] is True + assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 + + +@mock_secretsmanager +def test_rotate_secret_that_is_marked_deleted(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + conn.delete_secret(SecretId='test-secret') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='test-secret') + + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', 'us-west-2') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-match') + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + # Test is intentionally empty. Boto3 catches too short ClientRequestToken + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=DEFAULT_SECRET_NAME, + SecretString='foosecret') + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, + ClientRequestToken=client_request_token) + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=DEFAULT_SECRET_NAME, + SecretString='foosecret') + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, + RotationLambdaARN=rotation_lambda_arn) + +@mock_secretsmanager +def test_rotate_secret_rotation_period_zero(): + # Test is intentionally empty. Boto3 catches zero day rotation period + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_rotation_period_too_long(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=DEFAULT_SECRET_NAME, + SecretString='foosecret') + + rotation_rules = {'AutomaticallyAfterDays': 1001} + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=DEFAULT_SECRET_NAME, + RotationRules=rotation_rules) + +@mock_secretsmanager +def test_put_secret_value_puts_new_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='foosecret', + VersionStages=['AWSCURRENT']) + version_id = put_secret_value_dict['VersionId'] + + get_secret_value_dict = conn.get_secret_value(SecretId=DEFAULT_SECRET_NAME, + VersionId=version_id, + VersionStage='AWSCURRENT') + + assert get_secret_value_dict + assert get_secret_value_dict['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_put_secret_value_can_get_first_version_if_put_twice(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='first_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='second_secret', + VersionStages=['AWSCURRENT']) + + first_secret_value_dict = conn.get_secret_value(SecretId=DEFAULT_SECRET_NAME, + VersionId=first_version_id) + first_secret_value = first_secret_value_dict['SecretString'] + + assert first_secret_value == 'first_secret' + + +@mock_secretsmanager +def test_put_secret_value_versions_differ_if_same_secret_put_twice(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + second_version_id = put_secret_value_dict['VersionId'] + + assert first_version_id != second_version_id + + +@mock_secretsmanager +def test_can_list_secret_version_ids(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + first_version_id = put_secret_value_dict['VersionId'] + put_secret_value_dict = conn.put_secret_value(SecretId=DEFAULT_SECRET_NAME, + SecretString='dupe_secret', + VersionStages=['AWSCURRENT']) + second_version_id = put_secret_value_dict['VersionId'] + + versions_list = conn.list_secret_version_ids(SecretId=DEFAULT_SECRET_NAME) + + returned_version_ids = [v['VersionId'] for v in versions_list['Versions']] + + assert [first_version_id, second_version_id].sort() == returned_version_ids.sort() + diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py new file mode 100644 index 000000000..23d823239 --- /dev/null +++ b/tests/test_secretsmanager/test_server.py @@ -0,0 +1,607 @@ +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_secretsmanager + +''' +Test the different server responses for secretsmanager +''' + +DEFAULT_SECRET_NAME = 'test-secret' + + +@mock_secretsmanager +def test_get_secret_value(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": DEFAULT_SECRET_NAME, + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + get_secret = test_client.post('/', + data={"SecretId": DEFAULT_SECRET_NAME, + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + json_data = json.loads(get_secret.data.decode("utf-8")) + + assert json_data['SecretString'] == 'foo-secret' + +@mock_secretsmanager +def test_get_secret_that_does_not_exist(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + get_secret = test_client.post('/', + data={"SecretId": "i-dont-exist", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_get_secret_that_does_not_match(): + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": DEFAULT_SECRET_NAME, + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + get_secret = test_client.post('/', + data={"SecretId": "i-dont-match", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_create_secret(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + res = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + res_2 = test_client.post('/', + data={"Name": "test-secret-2", + "SecretString": "bar-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + assert json_data['ARN'] != '' + assert json_data['Name'] == 'test-secret' + + json_data_2 = json.loads(res_2.data.decode("utf-8")) + assert json_data_2['ARN'] != '' + assert json_data_2['Name'] == 'test-secret-2' + +@mock_secretsmanager +def test_describe_secret(): + + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + describe_secret = test_client.post('/', + data={"SecretId": "test-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + create_secret_2 = test_client.post('/', + data={"Name": "test-secret-2", + "SecretString": "barsecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + describe_secret_2 = test_client.post('/', + data={"SecretId": "test-secret-2"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + json_data = json.loads(describe_secret.data.decode("utf-8")) + assert json_data # Returned dict is not empty + assert json_data['ARN'] != '' + assert json_data['Name'] == 'test-secret' + + json_data_2 = json.loads(describe_secret_2.data.decode("utf-8")) + assert json_data_2 # Returned dict is not empty + assert json_data_2['ARN'] != '' + assert json_data_2['Name'] == 'test-secret-2' + +@mock_secretsmanager +def test_describe_secret_that_does_not_exist(): + + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + describe_secret = test_client.post('/', + data={"SecretId": "i-dont-exist"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + json_data = json.loads(describe_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_describe_secret_that_does_not_match(): + + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": DEFAULT_SECRET_NAME, + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + describe_secret = test_client.post('/', + data={"SecretId": "i-dont-match"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + json_data = json.loads(describe_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": DEFAULT_SECRET_NAME, + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" + rotate_secret = test_client.post('/', + data={"SecretId": DEFAULT_SECRET_NAME, + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data # Returned dict is not empty + assert json_data['ARN'] != '' + assert json_data['Name'] == DEFAULT_SECRET_NAME + assert json_data['VersionId'] == client_request_token + +# @mock_secretsmanager +# def test_rotate_secret_enable_rotation(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post( +# '/', +# data={ +# "Name": "test-secret", +# "SecretString": "foosecret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# initial_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(initial_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is False +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 0 + +# rotate_secret = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 42} +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# rotated_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(rotated_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is True +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-exist"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": DEFAULT_SECRET_NAME, + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-match"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": DEFAULT_SECRET_NAME, + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" + rotate_secret = test_client.post('/', + data={"SecretId": DEFAULT_SECRET_NAME, + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": DEFAULT_SECRET_NAME, + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + rotate_secret = test_client.post('/', + data={"SecretId": DEFAULT_SECRET_NAME, + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": DEFAULT_SECRET_NAME, + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + rotate_secret = test_client.post('/', + data={"SecretId": DEFAULT_SECRET_NAME, + "RotationLambdaARN": rotation_lambda_arn}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "RotationLambdaARN must <= 2048 characters long." + assert json_data['__type'] == 'InvalidParameterException' + + + + + +@mock_secretsmanager +def test_put_secret_value_puts_new_secret(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "foosecret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "foosecret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + + version_id = second_secret_json_data['VersionId'] + + secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "VersionId": version_id, + "VersionStage": 'AWSCURRENT'}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + second_secret_json_data = json.loads(secret_value_json.data.decode("utf-8")) + + assert second_secret_json_data + assert second_secret_json_data['SecretString'] == 'foosecret' + + +@mock_secretsmanager +def test_put_secret_value_can_get_first_version_if_put_twice(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + first_secret_string = 'first_secret' + second_secret_string = 'second_secret' + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": first_secret_string, + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + + first_secret_version_id = first_secret_json_data['VersionId'] + + test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": second_secret_string, + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + + get_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "VersionId": first_secret_version_id, + "VersionStage": 'AWSCURRENT'}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + get_first_secret_json_data = json.loads(get_first_secret_value_json.data.decode("utf-8")) + + assert get_first_secret_json_data + assert get_first_secret_json_data['SecretString'] == first_secret_string + + +@mock_secretsmanager +def test_put_secret_value_versions_differ_if_same_secret_put_twice(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + first_secret_version_id = first_secret_json_data['VersionId'] + + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + second_secret_version_id = second_secret_json_data['VersionId'] + + assert first_secret_version_id != second_secret_version_id + + +@mock_secretsmanager +def test_can_list_secret_version_ids(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + put_first_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + first_secret_json_data = json.loads(put_first_secret_value_json.data.decode("utf-8")) + first_secret_version_id = first_secret_json_data['VersionId'] + put_second_secret_value_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, + "SecretString": "secret", + "VersionStages": ["AWSCURRENT"]}, + headers={ + "X-Amz-Target": "secretsmanager.PutSecretValue"}, + ) + second_secret_json_data = json.loads(put_second_secret_value_json.data.decode("utf-8")) + second_secret_version_id = second_secret_json_data['VersionId'] + + list_secret_versions_json = test_client.post('/', + data={ + "SecretId": DEFAULT_SECRET_NAME, }, + headers={ + "X-Amz-Target": "secretsmanager.ListSecretVersionIds"}, + ) + + versions_list = json.loads(list_secret_versions_json.data.decode("utf-8")) + + returned_version_ids = [v['VersionId'] for v in versions_list['Versions']] + + assert [first_secret_version_id, second_secret_version_id].sort() == returned_version_ids.sort() + +# +# The following tests should work, but fail on the embedded dict in +# RotationRules. The error message suggests a problem deeper in the code, which +# needs further investigation. +# + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_zero(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 0}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_too_long(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 1001}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index 5d39f61d4..e800b8035 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -136,3 +136,59 @@ def test_send_raw_email(): send_quota = conn.get_send_quota() sent_count = int(send_quota['SentLast24Hours']) sent_count.should.equal(2) + + +@mock_ses +def test_send_raw_email_without_source(): + conn = boto3.client('ses', region_name='us-east-1') + + message = MIMEMultipart() + message['Subject'] = 'Test' + message['From'] = 'test@example.com' + message['To'] = 'to@example.com, foo@example.com' + + # Message body + part = MIMEText('test file attached') + message.attach(part) + + # Attachment + part = MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + kwargs = dict( + RawMessage={'Data': message.as_string()}, + ) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_email_identity(EmailAddress="test@example.com") + conn.send_raw_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['SentLast24Hours']) + sent_count.should.equal(2) + + +@mock_ses +def test_send_raw_email_without_source_or_from(): + conn = boto3.client('ses', region_name='us-east-1') + + message = MIMEMultipart() + message['Subject'] = 'Test' + message['To'] = 'to@example.com, foo@example.com' + + # Message body + part = MIMEText('test file attached') + message.attach(part) + # Attachment + part = MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + kwargs = dict( + RawMessage={'Data': message.as_string()}, + ) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + diff --git a/tests/test_ses/test_ses_sns_boto3.py b/tests/test_ses/test_ses_sns_boto3.py new file mode 100644 index 000000000..37f79a8b0 --- /dev/null +++ b/tests/test_ses/test_ses_sns_boto3.py @@ -0,0 +1,114 @@ +from __future__ import unicode_literals + +import boto3 +import json +from botocore.exceptions import ClientError +from six.moves.email_mime_multipart import MIMEMultipart +from six.moves.email_mime_text import MIMEText + +import sure # noqa +from nose import tools +from moto import mock_ses, mock_sns, mock_sqs +from moto.ses.models import SESFeedback + + +@mock_ses +def test_enable_disable_ses_sns_communication(): + conn = boto3.client('ses', region_name='us-east-1') + conn.set_identity_notification_topic( + Identity='test.com', + NotificationType='Bounce', + SnsTopic='the-arn' + ) + conn.set_identity_notification_topic( + Identity='test.com', + NotificationType='Bounce' + ) + + +def __setup_feedback_env__(ses_conn, sns_conn, sqs_conn, domain, topic, queue, region, expected_msg): + """Setup the AWS environment to test the SES SNS Feedback""" + # Environment setup + # Create SQS queue + sqs_conn.create_queue(QueueName=queue) + # Create SNS topic + create_topic_response = sns_conn.create_topic(Name=topic) + topic_arn = create_topic_response["TopicArn"] + # Subscribe the SNS topic to the SQS queue + sns_conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:%s:123456789012:%s" % (region, queue)) + # Verify SES domain + ses_conn.verify_domain_identity(Domain=domain) + # Setup SES notification topic + if expected_msg is not None: + ses_conn.set_identity_notification_topic( + Identity=domain, + NotificationType=expected_msg, + SnsTopic=topic_arn + ) + + +def __test_sns_feedback__(addr, expected_msg): + region_name = "us-east-1" + ses_conn = boto3.client('ses', region_name=region_name) + sns_conn = boto3.client('sns', region_name=region_name) + sqs_conn = boto3.resource('sqs', region_name=region_name) + domain = "example.com" + topic = "bounce-arn-feedback" + queue = "feedback-test-queue" + + __setup_feedback_env__(ses_conn, sns_conn, sqs_conn, domain, topic, queue, region_name, expected_msg) + + # Send the message + kwargs = dict( + Source="test@" + domain, + Destination={ + "ToAddresses": [addr + "@" + domain], + "CcAddresses": ["test_cc@" + domain], + "BccAddresses": ["test_bcc@" + domain], + }, + Message={ + "Subject": {"Data": "test subject"}, + "Body": {"Text": {"Data": "test body"}} + } + ) + ses_conn.send_email(**kwargs) + + # Wait for messages in the queues + queue = sqs_conn.get_queue_by_name(QueueName=queue) + messages = queue.receive_messages(MaxNumberOfMessages=1) + if expected_msg is not None: + msg = messages[0].body + msg = json.loads(msg) + assert msg["Message"] == SESFeedback.generate_message(expected_msg) + else: + assert len(messages) == 0 + + +@mock_sqs +@mock_sns +@mock_ses +def test_no_sns_feedback(): + __test_sns_feedback__("test", None) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_bounce(): + __test_sns_feedback__(SESFeedback.BOUNCE_ADDR, SESFeedback.BOUNCE) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_complaint(): + __test_sns_feedback__(SESFeedback.COMPLAINT_ADDR, SESFeedback.COMPLAINT) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_delivery(): + __test_sns_feedback__(SESFeedback.SUCCESS_ADDR, SESFeedback.DELIVERY) diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index b626e2fac..964296837 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -1,16 +1,15 @@ from __future__ import unicode_literals -from six.moves.urllib.parse import parse_qs import boto +import json import re from freezegun import freeze_time import sure # noqa -from moto.packages.responses import responses from moto import mock_sns_deprecated, mock_sqs_deprecated -MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' +MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "%s",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' @mock_sqs_deprecated @@ -29,13 +28,14 @@ def test_publish_to_sqs(): "arn:aws:sqs:us-east-1:123456789012:test-queue") message_to_publish = 'my message' + subject_to_publish = "test subject" with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(topic=topic_arn, message=message_to_publish) + published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] queue = sqs_conn.get_queue("test-queue") message = queue.read(1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, 'us-east-1') + expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-east-1') acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) acquired_message.should.equal(expected) @@ -56,13 +56,14 @@ def test_publish_to_sqs_in_different_region(): "arn:aws:sqs:us-west-2:123456789012:test-queue") message_to_publish = 'my message' + subject_to_publish = "test subject" with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(topic=topic_arn, message=message_to_publish) + published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] queue = sqs_conn.get_queue("test-queue") message = queue.read(1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, 'us-west-1') + expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-west-1') acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) acquired_message.should.equal(expected) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 1540ceb84..3d598d406 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -1,18 +1,17 @@ from __future__ import unicode_literals +import base64 import json -from six.moves.urllib.parse import parse_qs - import boto3 import re from freezegun import freeze_time import sure # noqa -from moto.packages.responses import responses +import responses from botocore.exceptions import ClientError +from nose.tools import assert_raises from moto import mock_sns, mock_sqs -from freezegun import freeze_time MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' @@ -44,6 +43,106 @@ def test_publish_to_sqs(): acquired_message.should.equal(expected) +@mock_sqs +@mock_sns +def test_publish_to_sqs_raw(): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + subscription = topic.subscribe( + Protocol='sqs', Endpoint=queue.attributes['QueueArn']) + + subscription.set_attributes( + AttributeName='RawMessageDelivery', AttributeValue='true') + + message = 'my message' + with freeze_time("2015-01-01 12:00:00"): + topic.publish(Message=message) + + messages = queue.receive_messages(MaxNumberOfMessages=1) + messages[0].body.should.equal(message) + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_bad(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + try: + # Test missing Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': {'DataType': 'String'}}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty DataType (if the DataType field is missing entirely + # botocore throws an exception during validation) + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': '', + 'StringValue': 'example_corp' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'String', + 'StringValue': '' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_msg_attr_byte_value(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + queue = sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'Binary', + 'BinaryValue': b'\x02\x03\x04' + }}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': { + 'Type': 'Binary', + 'Value': base64.b64encode(b'\x02\x03\x04').decode() + } + }]) + + @mock_sns def test_publish_sms(): client = boto3.client('sns', region_name='us-east-1') @@ -155,8 +254,10 @@ def test_publish_to_sqs_in_different_region(): @mock_sns def test_publish_to_http(): def callback(request): - request.headers["Content-Type"].should.equal("application/json") - json.loads.when.called_with(request.body).should_not.throw(Exception) + request.headers["Content-Type"].should.equal("text/plain; charset=UTF-8") + json.loads.when.called_with( + request.body.decode() + ).should_not.throw(Exception) return 200, {}, "" responses.add_callback( @@ -176,7 +277,6 @@ def test_publish_to_http(): response = conn.publish( TopicArn=topic_arn, Message="my message", Subject="my subject") - message_id = response['MessageId'] @mock_sqs @@ -207,3 +307,183 @@ def test_publish_subject(): err.response['Error']['Code'].should.equal('InvalidParameter') else: raise RuntimeError('Should have raised an InvalidParameter exception') + + +@mock_sns +def test_publish_message_too_long(): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + with assert_raises(ClientError): + topic.publish( + Message="".join(["." for i in range(0, 262145)])) + + # message short enough - does not raise an error + topic.publish( + Message="".join(["." for i in range(0, 262144)])) + + +def _setup_filter_policy_test(filter_policy): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + subscription = topic.subscribe( + Protocol='sqs', Endpoint=queue.attributes['QueueArn']) + + subscription.set_attributes( + AttributeName='FilterPolicy', AttributeValue=json.dumps(filter_policy)) + + return topic, subscription, queue + + +@mock_sqs +@mock_sns +def test_filtering_exact_string(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal( + [{'store': {'Type': 'String', 'Value': 'example_corp'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_multiple_message_attributes(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_OR_matching(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp', 'different_corp']}) + + topic.publish( + Message='match example_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + topic.publish( + Message='match different_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp', 'match different_corp']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([ + {'store': {'Type': 'String', 'Value': 'example_corp'}}, + {'store': {'Type': 'String', 'Value': 'different_corp'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_positive(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_cancelled', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp order_cancelled']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_accepted', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_accepted'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='no match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_attributes_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish(Message='no match') + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index 4446febfc..2a56c8213 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -25,6 +25,23 @@ def test_subscribe_sms(): ) resp.should.contain('SubscriptionArn') +@mock_sns +def test_double_subscription(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + do_subscribe_sqs = lambda sqs_arn: client.subscribe( + TopicArn=arn, + Protocol='sqs', + Endpoint=sqs_arn + ) + resp1 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + resp2 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + + resp1['SubscriptionArn'].should.equal(resp2['SubscriptionArn']) + @mock_sns def test_subscribe_bad_sms(): @@ -165,6 +182,72 @@ def test_subscription_paging(): topic1_subscriptions.shouldnt.have("NextToken") +@mock_sns +def test_creating_subscription_with_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + delivery_policy = json.dumps({ + 'healthyRetryPolicy': { + "numRetries": 10, + "minDelayTarget": 1, + "maxDelayTarget":2 + } + }) + + filter_policy = json.dumps({ + "store": ["example_corp"], + "event": ["order_cancelled"], + "encrypted": [False], + "customer_interests": ["basketball", "baseball"] + }) + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/", + Attributes={ + 'RawMessageDelivery': 'true', + 'DeliveryPolicy': delivery_policy, + 'FilterPolicy': filter_policy + }) + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Test the subscription attributes have been set + subscription_arn = subscription["SubscriptionArn"] + attrs = conn.get_subscription_attributes( + SubscriptionArn=subscription_arn + ) + + attrs['Attributes']['RawMessageDelivery'].should.equal('true') + attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) + attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) + + # Now unsubscribe the subscription + conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"]) + + # And there should be zero subscriptions left + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) + + # invalid attr name + with assert_raises(ClientError): + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/", + Attributes={ + 'InvalidName': 'true' + }) + + @mock_sns def test_set_subscription_attributes(): conn = boto3.client('sns', region_name='us-east-1') @@ -206,11 +289,26 @@ def test_set_subscription_attributes(): AttributeName='DeliveryPolicy', AttributeValue=delivery_policy ) + + filter_policy = json.dumps({ + "store": ["example_corp"], + "event": ["order_cancelled"], + "encrypted": [False], + "customer_interests": ["basketball", "baseball"] + }) + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='FilterPolicy', + AttributeValue=filter_policy + ) + attrs = conn.get_subscription_attributes( SubscriptionArn=subscription_arn ) + attrs['Attributes']['RawMessageDelivery'].should.equal('true') attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) + attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) # not existing subscription with assert_raises(ClientError): diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 95dd41f89..870fa6f6e 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -13,23 +13,36 @@ from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POL @mock_sns def test_create_and_delete_topic(): conn = boto3.client("sns", region_name="us-east-1") - conn.create_topic(Name="some-topic") + for topic_name in ('some-topic', '-some-topic-', '_some-topic_', 'a' * 256): + conn.create_topic(Name=topic_name) + topics_json = conn.list_topics() + topics = topics_json["Topics"] + topics.should.have.length_of(1) + topics[0]['TopicArn'].should.equal( + "arn:aws:sns:{0}:123456789012:{1}" + .format(conn._client_config.region_name, topic_name) + ) + + # Delete the topic + conn.delete_topic(TopicArn=topics[0]['TopicArn']) + + # And there should now be 0 topics + topics_json = conn.list_topics() + topics = topics_json["Topics"] + topics.should.have.length_of(0) + + +@mock_sns +def test_create_topic_with_attributes(): + conn = boto3.client("sns", region_name="us-east-1") + conn.create_topic(Name='some-topic-with-attribute', Attributes={'DisplayName': 'test-topic'}) topics_json = conn.list_topics() - topics = topics_json["Topics"] - topics.should.have.length_of(1) - topics[0]['TopicArn'].should.equal( - "arn:aws:sns:{0}:123456789012:some-topic" - .format(conn._client_config.region_name) - ) + topic_arn = topics_json["Topics"][0]['TopicArn'] - # Delete the topic - conn.delete_topic(TopicArn=topics[0]['TopicArn']) + attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] + attributes['DisplayName'].should.equal('test-topic') - # And there should now be 0 topics - topics_json = conn.list_topics() - topics = topics_json["Topics"] - topics.should.have.length_of(0) @mock_sns def test_create_topic_should_be_indempodent(): diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index c761ec8d9..d53ae50f7 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -40,6 +40,56 @@ def test_create_fifo_queue_fail(): raise RuntimeError('Should of raised InvalidParameterValue Exception') +@mock_sqs +def test_create_queue_with_same_attributes(): + sqs = boto3.client('sqs', region_name='us-east-1') + + dlq_url = sqs.create_queue(QueueName='test-queue-dlq')['QueueUrl'] + dlq_arn = sqs.get_queue_attributes(QueueUrl=dlq_url)['Attributes']['QueueArn'] + + attributes = { + 'DelaySeconds': '900', + 'MaximumMessageSize': '262144', + 'MessageRetentionPeriod': '1209600', + 'ReceiveMessageWaitTimeSeconds': '20', + 'RedrivePolicy': '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' % (dlq_arn), + 'VisibilityTimeout': '43200' + } + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + +@mock_sqs +def test_create_queue_with_different_attributes_fail(): + sqs = boto3.client('sqs', region_name='us-east-1') + + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'VisibilityTimeout': '10', + } + ) + try: + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'VisibilityTimeout': '60', + } + ) + except botocore.exceptions.ClientError as err: + err.response['Error']['Code'].should.equal('QueueAlreadyExists') + else: + raise RuntimeError('Should of raised QueueAlreadyExists Exception') + + @mock_sqs def test_create_fifo_queue(): sqs = boto3.client('sqs', region_name='us-east-1') @@ -72,6 +122,24 @@ def test_create_queue(): queue.attributes.get('VisibilityTimeout').should.equal('30') +@mock_sqs +def test_create_queue_kms(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + new_queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'KmsMasterKeyId': 'master-key-id', + 'KmsDataKeyReusePeriodSeconds': '600' + }) + new_queue.should_not.be.none + + queue = sqs.get_queue_by_name(QueueName='test-queue') + + queue.attributes.get('KmsMasterKeyId').should.equal('master-key-id') + queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') + + @mock_sqs def test_get_nonexistent_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') @@ -79,13 +147,15 @@ def test_get_nonexistent_queue(): sqs.get_queue_by_name(QueueName='nonexisting-queue') ex = err.exception ex.operation_name.should.equal('GetQueueUrl') - ex.response['Error']['Code'].should.equal('QueueDoesNotExist') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') with assert_raises(ClientError) as err: sqs.Queue('http://whatever-incorrect-queue-address').load() ex = err.exception ex.operation_name.should.equal('GetQueueAttributes') - ex.response['Error']['Code'].should.equal('QueueDoesNotExist') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') @mock_sqs @@ -150,6 +220,28 @@ def test_message_with_complex_attributes(): messages.should.have.length_of(1) +@mock_sqs +def test_send_message_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-group-id.fifo", + Attributes={'FifoQueue': 'true'}) + + sent = queue.send_message( + MessageBody="mydata", + MessageDeduplicationId="dedupe_id_1", + MessageGroupId="group_id_1", + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + message_attributes = messages[0].attributes + message_attributes.should.contain('MessageGroupId') + message_attributes['MessageGroupId'].should.equal('group_id_1') + message_attributes.should.contain('MessageDeduplicationId') + message_attributes['MessageDeduplicationId'].should.equal('dedupe_id_1') + + @mock_sqs def test_send_message_with_unicode_characters(): body_one = 'Héllo!😀' @@ -324,7 +416,9 @@ def test_send_receive_message_timestamps(): conn.create_queue(QueueName="test-queue") queue = sqs.Queue("test-queue") - queue.send_message(MessageBody="derp") + response = queue.send_message(MessageBody="derp") + assert response['ResponseMetadata']['RequestId'] + messages = conn.receive_message( QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] @@ -336,6 +430,36 @@ def test_send_receive_message_timestamps(): int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) +@mock_sqs +def test_max_number_of_messages_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=11) + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=0) + + # no error but also no messages returned + queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=0) + + +@mock_sqs +def test_wait_time_seconds_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=-1) + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=21) + + # no error but also no messages returned + queue.receive_messages(WaitTimeSeconds=0) + + @mock_sqs def test_receive_messages_with_wait_seconds_timeout_of_zero(): """ @@ -351,20 +475,6 @@ def test_receive_messages_with_wait_seconds_timeout_of_zero(): messages.should.equal([]) -@mock_sqs -def test_receive_messages_with_wait_seconds_timeout_of_negative_one(): - """ - test that zero messages is returned with a wait_seconds_timeout of negative 1 - :return: - """ - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - messages = queue.receive_messages(WaitTimeSeconds=-1) - messages.should.equal([]) - - @mock_sqs_deprecated def test_send_message_with_xml_characters(): conn = boto.connect_sqs('the_key', 'the_secret') @@ -890,7 +1000,7 @@ def test_create_fifo_queue_with_dlq(): def test_queue_with_dlq(): if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': raise SkipTest('Cant manipulate time in server mode') - + sqs = boto3.client('sqs', region_name='us-east-1') with freeze_time("2015-01-01 12:00:00"): @@ -932,3 +1042,198 @@ def test_queue_with_dlq(): resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) resp['queueUrls'][0].should.equal(queue_url2) + + +@mock_sqs +def test_redrive_policy_available(): + sqs = boto3.client('sqs', region_name='us-east-1') + + resp = sqs.create_queue(QueueName='test-deadletter') + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + redrive_policy = { + 'deadLetterTargetArn': queue_arn1, + 'maxReceiveCount': 1, + } + + resp = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + queue_url2 = resp['QueueUrl'] + attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] + assert 'RedrivePolicy' in attributes + assert json.loads(attributes['RedrivePolicy']) == redrive_policy + + # Cant have redrive policy without maxReceiveCount + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1}) + } + ) + + +@mock_sqs +def test_redrive_policy_non_existent_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + redrive_policy = { + 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789012:no-queue', + 'maxReceiveCount': 1, + } + + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + +@mock_sqs +def test_redrive_policy_set_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue(QueueName='test-queue') + deadletter_queue = sqs.create_queue(QueueName='test-deadletter') + + redrive_policy = { + 'deadLetterTargetArn': deadletter_queue.attributes['QueueArn'], + 'maxReceiveCount': 1, + } + + queue.set_attributes(Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy)}) + + copy = sqs.get_queue_by_name(QueueName='test-queue') + assert 'RedrivePolicy' in copy.attributes + copy_policy = json.loads(copy.attributes['RedrivePolicy']) + assert copy_policy == redrive_policy + + +@mock_sqs +def test_receive_messages_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + # message is now processed, next one should be available + message.delete() + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_receive_messages_with_message_group_id_on_requeue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + # message is now available again, next one should be available + message.change_visibility(VisibilityTimeout=0) + messages = queue.receive_messages() + messages.should.have.length_of(1) + messages[0].message_id.should.equal(message.message_id) + + +@mock_sqs +def test_receive_messages_with_message_group_id_on_visibility_timeout(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + with freeze_time("2015-01-01 12:00:00"): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + message.change_visibility(VisibilityTimeout=10) + + with freeze_time("2015-01-01 12:00:05"): + # no timeout yet + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + with freeze_time("2015-01-01 12:00:15"): + # message is now available again, next one should be available + messages = queue.receive_messages() + messages.should.have.length_of(1) + messages[0].message_id.should.equal(message.message_id) + +@mock_sqs +def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'ReceiveMessageWaitTimeSeconds': '2', + } + ) + + queue.receive_messages() diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 781727c26..77d439d83 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -3,8 +3,14 @@ from __future__ import unicode_literals import boto3 import botocore.exceptions import sure # noqa +import datetime +import uuid +import json -from moto import mock_ssm +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_ssm, mock_cloudformation @mock_ssm @@ -75,6 +81,46 @@ def test_get_parameters_by_path(): Value='value4', Type='String') + client.put_parameter( + Name='/baz/name1', + Description='A test parameter (list)', + Value='value1,value2,value3', + Type='StringList') + + client.put_parameter( + Name='/baz/name2', + Description='A test parameter', + Value='value1', + Type='String') + + client.put_parameter( + Name='/baz/pwd', + Description='A secure test parameter', + Value='my_secret', + Type='SecureString', + KeyId='alias/aws/ssm') + + client.put_parameter( + Name='foo', + Description='A test parameter', + Value='bar', + Type='String') + + client.put_parameter( + Name='baz', + Description='A test parameter', + Value='qux', + Type='String') + + response = client.get_parameters_by_path(Path='/', Recursive=False) + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['bar', 'qux']) + ) + + response = client.get_parameters_by_path(Path='/', Recursive=True) + len(response['Parameters']).should.equal(9) + response = client.get_parameters_by_path(Path='/foo') len(response['Parameters']).should.equal(2) {p['Value'] for p in response['Parameters']}.should.equal( @@ -91,17 +137,88 @@ def test_get_parameters_by_path(): set(['value3', 'value4']) ) + response = client.get_parameters_by_path(Path='/baz') + len(response['Parameters']).should.equal(3) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + # note: 'Option' is optional (default: 'Equals') + filters = [{ + 'Key': 'Type', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String', 'SecureString'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2', '/baz/pwd']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'BeginsWith', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1', '/baz/name2']) + ) + + filters = [{ + 'Key': 'KeyId', + 'Option': 'Equals', + 'Values': ['alias/aws/ssm'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/pwd']) + ) + @mock_ssm def test_put_parameter(): client = boto3.client('ssm', region_name='us-east-1') - client.put_parameter( + response = client.put_parameter( Name='test', Description='A test parameter', Value='value', Type='String') + response['Version'].should.equal(1) + response = client.get_parameters( Names=[ 'test' @@ -112,6 +229,65 @@ def test_put_parameter(): response['Parameters'][0]['Name'].should.equal('test') response['Parameters'][0]['Value'].should.equal('value') response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(1) + + try: + client.put_parameter( + Name='test', + Description='desc 2', + Value='value 2', + Type='String') + raise RuntimeError('Should fail') + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal('PutParameter') + err.response['Error']['Message'].should.equal('Parameter test already exists.') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + # without overwrite nothing change + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(1) + + response = client.put_parameter( + Name='test', + Description='desc 3', + Value='value 3', + Type='String', + Overwrite=True) + + response['Version'].should.equal(2) + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + # without overwrite nothing change + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value 3') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(2) + +@mock_ssm +def test_put_parameter_china(): + client = boto3.client('ssm', region_name='cn-north-1') + + response = client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response['Version'].should.equal(1) @mock_ssm @@ -155,13 +331,15 @@ def test_describe_parameters(): Name='test', Description='A test parameter', Value='value', - Type='String') + Type='String', + AllowedPattern=r'.*') response = client.describe_parameters() len(response['Parameters']).should.equal(1) response['Parameters'][0]['Name'].should.equal('test') response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['AllowedPattern'].should.equal(r'.*') @mock_ssm @@ -280,6 +458,35 @@ def test_describe_parameters_filter_keyid(): ''.should.equal(response.get('NextToken', '')) +@mock_ssm +def test_describe_parameters_attributes(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='aa', + Value='11', + Type='String', + Description='my description' + ) + + client.put_parameter( + Name='bb', + Value='22', + Type='String' + ) + + response = client.describe_parameters() + len(response['Parameters']).should.equal(2) + + response['Parameters'][0]['Description'].should.equal('my description') + response['Parameters'][0]['Version'].should.equal(1) + response['Parameters'][0]['LastModifiedDate'].should.be.a(datetime.date) + response['Parameters'][0]['LastModifiedUser'].should.equal('N/A') + + response['Parameters'][1].get('Description').should.be.none + response['Parameters'][1]['Version'].should.equal(1) + + @mock_ssm def test_get_parameter_invalid(): client = client = boto3.client('ssm', region_name='us-east-1') @@ -390,3 +597,204 @@ def test_add_remove_list_tags_for_resource(): ResourceType='Parameter' ) len(response['TagList']).should.equal(0) + + +@mock_ssm +def test_send_command(): + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + client = boto3.client('ssm', region_name='us-east-1') + # note the timeout is determined server side, so this is a simpler check. + before = datetime.datetime.now() + + response = client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref' + ) + cmd = response['Command'] + + cmd['CommandId'].should_not.be(None) + cmd['DocumentName'].should.equal(ssm_document) + cmd['Parameters'].should.equal(params) + + cmd['OutputS3Region'].should.equal('us-east-2') + cmd['OutputS3BucketName'].should.equal('the-bucket') + cmd['OutputS3KeyPrefix'].should.equal('pref') + + cmd['ExpiresAfter'].should.be.greater_than(before) + + # test sending a command without any optional parameters + response = client.send_command( + DocumentName=ssm_document) + + cmd = response['Command'] + + cmd['CommandId'].should_not.be(None) + cmd['DocumentName'].should.equal(ssm_document) + + +@mock_ssm +def test_list_commands(): + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + + # get the command by id + response = client.list_commands( + CommandId=cmd_id) + + cmds = response['Commands'] + len(cmds).should.equal(1) + cmds[0]['CommandId'].should.equal(cmd_id) + + # add another command with the same instance id to test listing by + # instance id + client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document) + + response = client.list_commands( + InstanceId='i-123456') + + cmds = response['Commands'] + len(cmds).should.equal(2) + + for cmd in cmds: + cmd['InstanceIds'].should.contain('i-123456') + + # test the error case for an invalid command id + with assert_raises(ClientError): + response = client.list_commands( + CommandId=str(uuid.uuid4())) + +@mock_ssm +def test_get_command_invocation(): + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + InstanceIds=['i-123456', 'i-234567', 'i-345678'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + + instance_id = 'i-345678' + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='aws:runShellScript') + + invocation_response['CommandId'].should.equal(cmd_id) + invocation_response['InstanceId'].should.equal(instance_id) + + # test the error case for an invalid instance id + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId='i-FAKE') + + # test the error case for an invalid plugin name + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='FAKE') + +@mock_ssm +@mock_cloudformation +def test_get_command_invocations_from_stack(): + stack_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Test Stack", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-test-image-id", + "KeyName": "test", + "InstanceType": "t2.micro", + "Tags": [ + { + "Key": "Test Description", + "Value": "Test tag" + }, + { + "Key": "Test Name", + "Value": "Name tag for tests" + } + ] + } + } + }, + "Outputs": { + "test": { + "Description": "Test Output", + "Value": "Test output value", + "Export": { + "Name": "Test value to export" + } + }, + "PublicIP": { + "Value": "Test public ip" + } + } + } + + cloudformation_client = boto3.client( + 'cloudformation', + region_name='us-east-1') + + stack_template_str = json.dumps(stack_template) + + response = cloudformation_client.create_stack( + StackName='test_stack', + TemplateBody=stack_template_str, + Capabilities=('CAPABILITY_IAM', )) + + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + Targets=[{ + 'Key': 'tag:aws:cloudformation:stack-name', + 'Values': ('test_stack', )}], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + instance_ids = cmd['InstanceIds'] + + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_ids[0], + PluginName='aws:runShellScript') diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 4e0e52606..36c9da258 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -64,11 +64,46 @@ def test_assume_role(): credentials = role.credentials credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') - credentials.session_token.should.equal( - "BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") - credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - credentials.secret_key.should.equal( - "aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + credentials.session_token.should.have.length_of(356) + assert credentials.session_token.startswith("FQoGZXIvYXdzE") + credentials.access_key.should.have.length_of(20) + assert credentials.access_key.startswith("ASIA") + credentials.secret_key.should.have.length_of(40) + + role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") + role.user.assume_role_id.should.contain("session-name") + + +@freeze_time("2012-01-01 12:00:00") +@mock_sts_deprecated +def test_assume_role_with_web_identity(): + conn = boto.connect_sts() + + policy = json.dumps({ + "Statement": [ + { + "Sid": "Stmt13690092345534", + "Action": [ + "S3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::foobar-tester" + ] + }, + ] + }) + s3_role = "arn:aws:iam::123456789012:role/test-role" + role = conn.assume_role_with_web_identity( + s3_role, "session-name", policy, duration_seconds=123) + + credentials = role.credentials + credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') + credentials.session_token.should.have.length_of(356) + assert credentials.session_token.startswith("FQoGZXIvYXdzE") + credentials.access_key.should.have.length_of(20) + assert credentials.access_key.startswith("ASIA") + credentials.secret_key.should.have.length_of(40) role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") role.user.assume_role_id.should.contain("session-name") diff --git a/tests/test_swf/models/test_activity_task.py b/tests/test_swf/models/test_activity_task.py index 5dddab975..41c88cafe 100644 --- a/tests/test_swf/models/test_activity_task.py +++ b/tests/test_swf/models/test_activity_task.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto.swf.exceptions import SWFWorkflowExecutionClosedError from moto.swf.models import ( diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index 57f66c830..1a8a1268d 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -1,4 +1,5 @@ from collections import namedtuple +import sure # noqa from moto.swf.exceptions import SWFUnknownResourceFault from moto.swf.models import Domain diff --git a/tests/test_swf/models/test_generic_type.py b/tests/test_swf/models/test_generic_type.py index d7410f395..294df9f84 100644 --- a/tests/test_swf/models/test_generic_type.py +++ b/tests/test_swf/models/test_generic_type.py @@ -1,4 +1,5 @@ from moto.swf.models import GenericType +import sure # noqa # Tests for GenericType (ActivityType, WorkflowType) diff --git a/tests/test_swf/models/test_history_event.py b/tests/test_swf/models/test_history_event.py index 43592aa6c..b869408ce 100644 --- a/tests/test_swf/models/test_history_event.py +++ b/tests/test_swf/models/test_history_event.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto.swf.models import HistoryEvent diff --git a/tests/test_swf/models/test_timeout.py b/tests/test_swf/models/test_timeout.py index d685bca8e..fb52652fd 100644 --- a/tests/test_swf/models/test_timeout.py +++ b/tests/test_swf/models/test_timeout.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto.swf.models import Timeout diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index 3511d4e56..c0b8897b9 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -1,5 +1,6 @@ from boto.swf.exceptions import SWFResponseError from freezegun import freeze_time +import sure # noqa from moto import mock_swf_deprecated from moto.swf import swf_backend diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index b283d3448..95d8a3733 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -1,5 +1,6 @@ import boto from boto.swf.exceptions import SWFResponseError +import sure # noqa from moto import mock_swf_deprecated diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index 466e1a2ae..972b1053b 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -1,5 +1,6 @@ from boto.swf.exceptions import SWFResponseError from freezegun import freeze_time +import sure # noqa from moto import mock_swf_deprecated from moto.swf import swf_backend diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index 3fa12d665..8edc76432 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -1,5 +1,6 @@ import boto from boto.swf.exceptions import SWFResponseError +import sure # noqa from moto import mock_swf_deprecated diff --git a/tests/test_swf/responses/test_timeouts.py b/tests/test_swf/responses/test_timeouts.py index 5bd0ead96..f49c597a4 100644 --- a/tests/test_swf/responses/test_timeouts.py +++ b/tests/test_swf/responses/test_timeouts.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto import mock_swf_deprecated diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index 5c97c778b..88e3caa75 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -34,6 +34,20 @@ def test_start_workflow_execution(): "test-domain", "uid-abcd1234", "test-workflow", "v1.0") wf.should.contain("runId") +@mock_swf_deprecated +def test_signal_workflow_execution(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + wfe = conn.signal_workflow_execution( + "test-domain", "my_signal", "uid-abcd1234", "my_input", run_id) + + wfe = conn.describe_workflow_execution( + "test-domain", run_id, "uid-abcd1234") + + wfe["openCounts"]["openDecisionTasks"].should.equal(2) @mock_swf_deprecated def test_start_already_started_workflow_execution(): diff --git a/tests/test_swf/test_exceptions.py b/tests/test_swf/test_exceptions.py index a23a14e66..8617242b9 100644 --- a/tests/test_swf/test_exceptions.py +++ b/tests/test_swf/test_exceptions.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +import sure # noqa import json diff --git a/tox.ini b/tox.ini index 0f3f1466a..570b5790f 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,12 @@ [tox] -envlist = py27, py36 +envlist = py27, py36, py37 [testenv] +setenv = + BOTO_CONFIG=/dev/null + AWS_SECRET_ACCESS_KEY=foobar_secret + AWS_ACCESS_KEY_ID=foobar_key + AWS_DEFAULT_REGION=us-east-1 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/requirements-dev.txt diff --git a/update_version_from_git.py b/update_version_from_git.py new file mode 100644 index 000000000..d72dc4ae9 --- /dev/null +++ b/update_version_from_git.py @@ -0,0 +1,120 @@ +""" +Adapted from https://github.com/pygame/pygameweb/blob/master/pygameweb/builds/update_version_from_git.py + +For updating the version from git. +__init__.py contains a __version__ field. +Update that. +If we are on master, we want to update the version as a pre-release. +git describe --tags +With these: + __init__.py + __version__= '0.0.2' + git describe --tags + 0.0.1-22-g729a5ae +We want this: + __init__.py + __version__= '0.0.2.dev22.g729a5ae' +Get the branch/tag name with this. + git symbolic-ref -q --short HEAD || git describe --tags --exact-match +""" + +import io +import os +import re +import subprocess + + +def migrate_source_attribute(attr, to_this, target_file, regex): + """Updates __magic__ attributes in the source file""" + change_this = re.compile(regex, re.S) + new_file = [] + found = False + + with open(target_file, 'r') as fp: + lines = fp.readlines() + + for line in lines: + if line.startswith(attr): + found = True + line = re.sub(change_this, to_this, line) + new_file.append(line) + + if found: + with open(target_file, 'w') as fp: + fp.writelines(new_file) + +def migrate_version(target_file, new_version): + """Updates __version__ in the source file""" + regex = r"['\"](.*)['\"]" + migrate_source_attribute('__version__', "'{new_version}'".format(new_version=new_version), target_file, regex) + + +def is_master_branch(): + cmd = ('git rev-parse --abbrev-ref HEAD') + tag_branch = subprocess.check_output(cmd, shell=True) + return tag_branch in [b'master\n'] + +def git_tag_name(): + cmd = ('git describe --tags') + tag_branch = subprocess.check_output(cmd, shell=True) + tag_branch = tag_branch.decode().strip() + return tag_branch + +def get_git_version_info(): + cmd = 'git describe --tags' + ver_str = subprocess.check_output(cmd, shell=True) + ver, commits_since, githash = ver_str.decode().strip().split('-') + return ver, commits_since, githash + +def prerelease_version(): + """ return what the prerelease version should be. + https://packaging.python.org/tutorials/distributing-packages/#pre-release-versioning + 0.0.2.dev22 + """ + ver, commits_since, githash = get_git_version_info() + initpy_ver = get_version() + + assert len(initpy_ver.split('.')) in [3, 4], 'moto/__init__.py version should be like 0.0.2.dev' + assert initpy_ver > ver, 'the moto/__init__.py version should be newer than the last tagged release.' + return '{initpy_ver}.{commits_since}'.format(initpy_ver=initpy_ver, commits_since=commits_since) + +def read(*parts): + """ Reads in file from *parts. + """ + try: + return io.open(os.path.join(*parts), 'r', encoding='utf-8').read() + except IOError: + return '' + +def get_version(): + """ Returns version from moto/__init__.py + """ + version_file = read('moto', '__init__.py') + version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', + version_file, re.MULTILINE) + if version_match: + return version_match.group(1) + raise RuntimeError('Unable to find version string.') + + +def release_version_correct(): + """Makes sure the: + - prerelease verion for master is correct. + - release version is correct for tags. + """ + if is_master_branch(): + # update for a pre release version. + initpy = os.path.abspath("moto/__init__.py") + + new_version = prerelease_version() + print('updating version in __init__.py to {new_version}'.format(new_version=new_version)) + assert len(new_version.split('.')) >= 4, 'moto/__init__.py version should be like 0.0.2.dev' + migrate_version(initpy, new_version) + else: + assert False, "No non-master deployments yet" + # check that we are a tag with the same version as in __init__.py + assert get_version() == git_tag_name(), 'git tag/branch name not the same as moto/__init__.py __verion__' + + +if __name__ == '__main__': + release_version_correct() diff --git a/wait_for.py b/wait_for.py index d313ea5a9..1f291c16b 100755 --- a/wait_for.py +++ b/wait_for.py @@ -12,8 +12,9 @@ except ImportError: # py3 import urllib.request as urllib from urllib.error import URLError + import socket - EXCEPTIONS = (URLError, ConnectionResetError) + EXCEPTIONS = (URLError, socket.timeout, ConnectionResetError) start_ts = time.time()